• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/base/bits.h"
6 #include "src/compiler/backend/instruction-selector-impl.h"
7 #include "src/compiler/node-matchers.h"
8 #include "src/compiler/node-properties.h"
9 
10 namespace v8 {
11 namespace internal {
12 namespace compiler {
13 
14 #define TRACE_UNIMPL() \
15   PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
16 
17 #define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
18 
19 // Adds Mips-specific methods for generating InstructionOperands.
20 class MipsOperandGenerator final : public OperandGenerator {
21  public:
MipsOperandGenerator(InstructionSelector * selector)22   explicit MipsOperandGenerator(InstructionSelector* selector)
23       : OperandGenerator(selector) {}
24 
UseOperand(Node * node,InstructionCode opcode)25   InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
26     if (CanBeImmediate(node, opcode)) {
27       return UseImmediate(node);
28     }
29     return UseRegister(node);
30   }
31 
32   // Use the zero register if the node has the immediate value zero, otherwise
33   // assign a register.
UseRegisterOrImmediateZero(Node * node)34   InstructionOperand UseRegisterOrImmediateZero(Node* node) {
35     if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
36         (IsFloatConstant(node) &&
37          (bit_cast<int64_t>(GetFloatConstantValue(node)) == 0))) {
38       return UseImmediate(node);
39     }
40     return UseRegister(node);
41   }
42 
IsIntegerConstant(Node * node)43   bool IsIntegerConstant(Node* node) {
44     return (node->opcode() == IrOpcode::kInt32Constant);
45   }
46 
GetIntegerConstantValue(Node * node)47   int64_t GetIntegerConstantValue(Node* node) {
48     DCHECK_EQ(IrOpcode::kInt32Constant, node->opcode());
49     return OpParameter<int32_t>(node->op());
50   }
51 
IsFloatConstant(Node * node)52   bool IsFloatConstant(Node* node) {
53     return (node->opcode() == IrOpcode::kFloat32Constant) ||
54            (node->opcode() == IrOpcode::kFloat64Constant);
55   }
56 
GetFloatConstantValue(Node * node)57   double GetFloatConstantValue(Node* node) {
58     if (node->opcode() == IrOpcode::kFloat32Constant) {
59       return OpParameter<float>(node->op());
60     }
61     DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
62     return OpParameter<double>(node->op());
63   }
64 
CanBeImmediate(Node * node,InstructionCode opcode)65   bool CanBeImmediate(Node* node, InstructionCode opcode) {
66     Int32Matcher m(node);
67     if (!m.HasResolvedValue()) return false;
68     int32_t value = m.ResolvedValue();
69     switch (ArchOpcodeField::decode(opcode)) {
70       case kMipsShl:
71       case kMipsSar:
72       case kMipsShr:
73         return is_uint5(value);
74       case kMipsAdd:
75       case kMipsAnd:
76       case kMipsOr:
77       case kMipsTst:
78       case kMipsSub:
79       case kMipsXor:
80         return is_uint16(value);
81       case kMipsLb:
82       case kMipsLbu:
83       case kMipsSb:
84       case kMipsLh:
85       case kMipsLhu:
86       case kMipsSh:
87       case kMipsLw:
88       case kMipsSw:
89       case kMipsLwc1:
90       case kMipsSwc1:
91       case kMipsLdc1:
92       case kMipsSdc1:
93         // true even for 32b values, offsets > 16b
94         // are handled in assembler-mips.cc
95         return is_int32(value);
96       default:
97         return is_int16(value);
98     }
99   }
100 
101  private:
ImmediateFitsAddrMode1Instruction(int32_t imm) const102   bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
103     TRACE_UNIMPL();
104     return false;
105   }
106 };
107 
VisitRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)108 static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
109                      Node* node) {
110   MipsOperandGenerator g(selector);
111   selector->Emit(opcode, g.DefineAsRegister(node),
112                  g.UseRegister(node->InputAt(0)),
113                  g.UseRegister(node->InputAt(1)));
114 }
115 
VisitUniqueRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)116 static void VisitUniqueRRR(InstructionSelector* selector, ArchOpcode opcode,
117                            Node* node) {
118   MipsOperandGenerator g(selector);
119   selector->Emit(opcode, g.DefineAsRegister(node),
120                  g.UseUniqueRegister(node->InputAt(0)),
121                  g.UseUniqueRegister(node->InputAt(1)));
122 }
123 
VisitRRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)124 void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
125   MipsOperandGenerator g(selector);
126   selector->Emit(
127       opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
128       g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
129 }
130 
VisitRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)131 static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
132                     Node* node) {
133   MipsOperandGenerator g(selector);
134   selector->Emit(opcode, g.DefineAsRegister(node),
135                  g.UseRegister(node->InputAt(0)));
136 }
137 
VisitRRI(InstructionSelector * selector,ArchOpcode opcode,Node * node)138 static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode,
139                      Node* node) {
140   MipsOperandGenerator g(selector);
141   int32_t imm = OpParameter<int32_t>(node->op());
142   selector->Emit(opcode, g.DefineAsRegister(node),
143                  g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
144 }
145 
VisitRRIR(InstructionSelector * selector,ArchOpcode opcode,Node * node)146 static void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode,
147                       Node* node) {
148   MipsOperandGenerator g(selector);
149   int32_t imm = OpParameter<int32_t>(node->op());
150   selector->Emit(opcode, g.DefineAsRegister(node),
151                  g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
152                  g.UseRegister(node->InputAt(1)));
153 }
154 
VisitRRO(InstructionSelector * selector,ArchOpcode opcode,Node * node)155 static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
156                      Node* node) {
157   MipsOperandGenerator g(selector);
158   selector->Emit(opcode, g.DefineAsRegister(node),
159                  g.UseRegister(node->InputAt(0)),
160                  g.UseOperand(node->InputAt(1), opcode));
161 }
162 
TryMatchImmediate(InstructionSelector * selector,InstructionCode * opcode_return,Node * node,size_t * input_count_return,InstructionOperand * inputs)163 bool TryMatchImmediate(InstructionSelector* selector,
164                        InstructionCode* opcode_return, Node* node,
165                        size_t* input_count_return, InstructionOperand* inputs) {
166   MipsOperandGenerator g(selector);
167   if (g.CanBeImmediate(node, *opcode_return)) {
168     *opcode_return |= AddressingModeField::encode(kMode_MRI);
169     inputs[0] = g.UseImmediate(node);
170     *input_count_return = 1;
171     return true;
172   }
173   return false;
174 }
175 
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,bool has_reverse_opcode,InstructionCode reverse_opcode,FlagsContinuation * cont)176 static void VisitBinop(InstructionSelector* selector, Node* node,
177                        InstructionCode opcode, bool has_reverse_opcode,
178                        InstructionCode reverse_opcode,
179                        FlagsContinuation* cont) {
180   MipsOperandGenerator g(selector);
181   Int32BinopMatcher m(node);
182   InstructionOperand inputs[2];
183   size_t input_count = 0;
184   InstructionOperand outputs[1];
185   size_t output_count = 0;
186 
187   if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count,
188                         &inputs[1])) {
189     inputs[0] = g.UseRegister(m.left().node());
190     input_count++;
191   } else if (has_reverse_opcode &&
192              TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
193                                &input_count, &inputs[1])) {
194     inputs[0] = g.UseRegister(m.right().node());
195     opcode = reverse_opcode;
196     input_count++;
197   } else {
198     inputs[input_count++] = g.UseRegister(m.left().node());
199     inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
200   }
201 
202   if (cont->IsDeoptimize()) {
203     // If we can deoptimize as a result of the binop, we need to make sure that
204     // the deopt inputs are not overwritten by the binop result. One way
205     // to achieve that is to declare the output register as same-as-first.
206     outputs[output_count++] = g.DefineSameAsFirst(node);
207   } else {
208     outputs[output_count++] = g.DefineAsRegister(node);
209   }
210 
211   DCHECK_NE(0u, input_count);
212   DCHECK_EQ(1u, output_count);
213   DCHECK_GE(arraysize(inputs), input_count);
214   DCHECK_GE(arraysize(outputs), output_count);
215 
216   selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
217                                  inputs, cont);
218 }
219 
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,bool has_reverse_opcode,InstructionCode reverse_opcode)220 static void VisitBinop(InstructionSelector* selector, Node* node,
221                        InstructionCode opcode, bool has_reverse_opcode,
222                        InstructionCode reverse_opcode) {
223   FlagsContinuation cont;
224   VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont);
225 }
226 
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)227 static void VisitBinop(InstructionSelector* selector, Node* node,
228                        InstructionCode opcode, FlagsContinuation* cont) {
229   VisitBinop(selector, node, opcode, false, kArchNop, cont);
230 }
231 
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode)232 static void VisitBinop(InstructionSelector* selector, Node* node,
233                        InstructionCode opcode) {
234   VisitBinop(selector, node, opcode, false, kArchNop);
235 }
236 
VisitPairAtomicBinop(InstructionSelector * selector,Node * node,ArchOpcode opcode)237 static void VisitPairAtomicBinop(InstructionSelector* selector, Node* node,
238                                  ArchOpcode opcode) {
239   MipsOperandGenerator g(selector);
240   Node* base = node->InputAt(0);
241   Node* index = node->InputAt(1);
242   Node* value = node->InputAt(2);
243   Node* value_high = node->InputAt(3);
244   AddressingMode addressing_mode = kMode_None;
245   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
246   InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
247                                  g.UseFixed(value, a1),
248                                  g.UseFixed(value_high, a2)};
249   InstructionOperand outputs[2];
250   size_t output_count = 0;
251   InstructionOperand temps[3];
252   size_t temp_count = 0;
253   temps[temp_count++] = g.TempRegister(a0);
254 
255   Node* projection0 = NodeProperties::FindProjection(node, 0);
256   Node* projection1 = NodeProperties::FindProjection(node, 1);
257   if (projection0) {
258     outputs[output_count++] = g.DefineAsFixed(projection0, v0);
259   } else {
260     temps[temp_count++] = g.TempRegister(v0);
261   }
262   if (projection1) {
263     outputs[output_count++] = g.DefineAsFixed(projection1, v1);
264   } else {
265     temps[temp_count++] = g.TempRegister(v1);
266   }
267   selector->Emit(code, output_count, outputs, arraysize(inputs), inputs,
268                  temp_count, temps);
269 }
270 
VisitStackSlot(Node * node)271 void InstructionSelector::VisitStackSlot(Node* node) {
272   StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
273   int alignment = rep.alignment();
274   int slot = frame_->AllocateSpillSlot(rep.size(), alignment);
275   OperandGenerator g(this);
276 
277   Emit(kArchStackSlot, g.DefineAsRegister(node),
278        sequence()->AddImmediate(Constant(slot)), 0, nullptr);
279 }
280 
VisitAbortCSADcheck(Node * node)281 void InstructionSelector::VisitAbortCSADcheck(Node* node) {
282   MipsOperandGenerator g(this);
283   Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
284 }
285 
VisitLoadTransform(Node * node)286 void InstructionSelector::VisitLoadTransform(Node* node) {
287   LoadTransformParameters params = LoadTransformParametersOf(node->op());
288   MipsOperandGenerator g(this);
289   Node* base = node->InputAt(0);
290   Node* index = node->InputAt(1);
291 
292   InstructionCode opcode = kArchNop;
293   switch (params.transformation) {
294     case LoadTransformation::kS128Load8Splat:
295       opcode = kMipsS128Load8Splat;
296       break;
297     case LoadTransformation::kS128Load16Splat:
298       opcode = kMipsS128Load16Splat;
299       break;
300     case LoadTransformation::kS128Load32Splat:
301       opcode = kMipsS128Load32Splat;
302       break;
303     case LoadTransformation::kS128Load64Splat:
304       opcode = kMipsS128Load64Splat;
305       break;
306     case LoadTransformation::kS128Load8x8S:
307       opcode = kMipsS128Load8x8S;
308       break;
309     case LoadTransformation::kS128Load8x8U:
310       opcode = kMipsS128Load8x8U;
311       break;
312     case LoadTransformation::kS128Load16x4S:
313       opcode = kMipsS128Load16x4S;
314       break;
315     case LoadTransformation::kS128Load16x4U:
316       opcode = kMipsS128Load16x4U;
317       break;
318     case LoadTransformation::kS128Load32x2S:
319       opcode = kMipsS128Load32x2S;
320       break;
321     case LoadTransformation::kS128Load32x2U:
322       opcode = kMipsS128Load32x2U;
323       break;
324     default:
325       UNIMPLEMENTED();
326   }
327 
328   if (g.CanBeImmediate(index, opcode)) {
329     Emit(opcode | AddressingModeField::encode(kMode_MRI),
330          g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
331   } else {
332     InstructionOperand addr_reg = g.TempRegister();
333     Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
334          g.UseRegister(index), g.UseRegister(base));
335     // Emit desired load opcode, using temp addr_reg.
336     Emit(opcode | AddressingModeField::encode(kMode_MRI),
337          g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
338   }
339 }
340 
VisitLoad(Node * node)341 void InstructionSelector::VisitLoad(Node* node) {
342   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
343   MipsOperandGenerator g(this);
344   Node* base = node->InputAt(0);
345   Node* index = node->InputAt(1);
346 
347   InstructionCode opcode = kArchNop;
348   switch (load_rep.representation()) {
349     case MachineRepresentation::kFloat32:
350       opcode = kMipsLwc1;
351       break;
352     case MachineRepresentation::kFloat64:
353       opcode = kMipsLdc1;
354       break;
355     case MachineRepresentation::kBit:  // Fall through.
356     case MachineRepresentation::kWord8:
357       opcode = load_rep.IsUnsigned() ? kMipsLbu : kMipsLb;
358       break;
359     case MachineRepresentation::kWord16:
360       opcode = load_rep.IsUnsigned() ? kMipsLhu : kMipsLh;
361       break;
362     case MachineRepresentation::kTaggedSigned:   // Fall through.
363     case MachineRepresentation::kTaggedPointer:  // Fall through.
364     case MachineRepresentation::kTagged:         // Fall through.
365     case MachineRepresentation::kWord32:
366       opcode = kMipsLw;
367       break;
368     case MachineRepresentation::kSimd128:
369       opcode = kMipsMsaLd;
370       break;
371     case MachineRepresentation::kCompressedPointer:  // Fall through.
372     case MachineRepresentation::kCompressed:         // Fall through.
373     case MachineRepresentation::kSandboxedPointer:   // Fall through.
374     case MachineRepresentation::kWord64:             // Fall through.
375     case MachineRepresentation::kMapWord:            // Fall through.
376     case MachineRepresentation::kNone:
377       UNREACHABLE();
378   }
379 
380   if (g.CanBeImmediate(index, opcode)) {
381     Emit(opcode | AddressingModeField::encode(kMode_MRI),
382          g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
383   } else {
384     InstructionOperand addr_reg = g.TempRegister();
385     Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
386          g.UseRegister(index), g.UseRegister(base));
387     // Emit desired load opcode, using temp addr_reg.
388     Emit(opcode | AddressingModeField::encode(kMode_MRI),
389          g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
390   }
391 }
392 
VisitProtectedLoad(Node * node)393 void InstructionSelector::VisitProtectedLoad(Node* node) {
394   // TODO(eholk)
395   UNIMPLEMENTED();
396 }
397 
VisitStore(Node * node)398 void InstructionSelector::VisitStore(Node* node) {
399   MipsOperandGenerator g(this);
400   Node* base = node->InputAt(0);
401   Node* index = node->InputAt(1);
402   Node* value = node->InputAt(2);
403 
404   StoreRepresentation store_rep = StoreRepresentationOf(node->op());
405   WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
406   MachineRepresentation rep = store_rep.representation();
407 
408   if (FLAG_enable_unconditional_write_barriers && CanBeTaggedPointer(rep)) {
409     write_barrier_kind = kFullWriteBarrier;
410   }
411 
412   // TODO(mips): I guess this could be done in a better way.
413   if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
414     DCHECK(CanBeTaggedPointer(rep));
415     InstructionOperand inputs[3];
416     size_t input_count = 0;
417     inputs[input_count++] = g.UseUniqueRegister(base);
418     inputs[input_count++] = g.UseUniqueRegister(index);
419     inputs[input_count++] = g.UseUniqueRegister(value);
420     RecordWriteMode record_write_mode =
421         WriteBarrierKindToRecordWriteMode(write_barrier_kind);
422     InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
423     size_t const temp_count = arraysize(temps);
424     InstructionCode code = kArchStoreWithWriteBarrier;
425     code |= MiscField::encode(static_cast<int>(record_write_mode));
426     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
427   } else {
428     ArchOpcode opcode;
429     switch (rep) {
430       case MachineRepresentation::kFloat32:
431         opcode = kMipsSwc1;
432         break;
433       case MachineRepresentation::kFloat64:
434         opcode = kMipsSdc1;
435         break;
436       case MachineRepresentation::kBit:  // Fall through.
437       case MachineRepresentation::kWord8:
438         opcode = kMipsSb;
439         break;
440       case MachineRepresentation::kWord16:
441         opcode = kMipsSh;
442         break;
443       case MachineRepresentation::kTaggedSigned:   // Fall through.
444       case MachineRepresentation::kTaggedPointer:  // Fall through.
445       case MachineRepresentation::kTagged:         // Fall through.
446       case MachineRepresentation::kWord32:
447         opcode = kMipsSw;
448         break;
449       case MachineRepresentation::kSimd128:
450         opcode = kMipsMsaSt;
451         break;
452       case MachineRepresentation::kCompressedPointer:  // Fall through.
453       case MachineRepresentation::kCompressed:         // Fall through.
454       case MachineRepresentation::kSandboxedPointer:   // Fall through.
455       case MachineRepresentation::kWord64:             // Fall through.
456       case MachineRepresentation::kMapWord:            // Fall through.
457       case MachineRepresentation::kNone:
458         UNREACHABLE();
459     }
460 
461     if (g.CanBeImmediate(index, opcode)) {
462       Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
463            g.UseRegister(base), g.UseImmediate(index),
464            g.UseRegisterOrImmediateZero(value));
465     } else {
466       InstructionOperand addr_reg = g.TempRegister();
467       Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
468            g.UseRegister(index), g.UseRegister(base));
469       // Emit desired store opcode, using temp addr_reg.
470       Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
471            addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
472     }
473   }
474 }
475 
VisitProtectedStore(Node * node)476 void InstructionSelector::VisitProtectedStore(Node* node) {
477   // TODO(eholk)
478   UNIMPLEMENTED();
479 }
480 
VisitLoadLane(Node * node)481 void InstructionSelector::VisitLoadLane(Node* node) { UNIMPLEMENTED(); }
482 
VisitStoreLane(Node * node)483 void InstructionSelector::VisitStoreLane(Node* node) { UNIMPLEMENTED(); }
484 
VisitWord32And(Node * node)485 void InstructionSelector::VisitWord32And(Node* node) {
486   MipsOperandGenerator g(this);
487   Int32BinopMatcher m(node);
488   if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
489       m.right().HasResolvedValue()) {
490     uint32_t mask = m.right().ResolvedValue();
491     uint32_t mask_width = base::bits::CountPopulation(mask);
492     uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
493     if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
494       // The mask must be contiguous, and occupy the least-significant bits.
495       DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
496 
497       // Select Ext for And(Shr(x, imm), mask) where the mask is in the least
498       // significant bits.
499       Int32BinopMatcher mleft(m.left().node());
500       if (mleft.right().HasResolvedValue()) {
501         // Any shift value can match; int32 shifts use `value % 32`.
502         uint32_t lsb = mleft.right().ResolvedValue() & 0x1F;
503 
504         // Ext cannot extract bits past the register size, however since
505         // shifting the original value would have introduced some zeros we can
506         // still use Ext with a smaller mask and the remaining bits will be
507         // zeros.
508         if (lsb + mask_width > 32) mask_width = 32 - lsb;
509 
510         if (lsb == 0 && mask_width == 32) {
511           Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node()));
512         } else {
513           Emit(kMipsExt, g.DefineAsRegister(node),
514                g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
515                g.TempImmediate(mask_width));
516         }
517         return;
518       }
519       // Other cases fall through to the normal And operation.
520     }
521   }
522   if (m.right().HasResolvedValue()) {
523     uint32_t mask = m.right().ResolvedValue();
524     uint32_t shift = base::bits::CountPopulation(~mask);
525     uint32_t msb = base::bits::CountLeadingZeros32(~mask);
526     if (shift != 0 && shift != 32 && msb + shift == 32) {
527       // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
528       // and remove constant loading of invereted mask.
529       Emit(kMipsIns, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
530            g.TempImmediate(0), g.TempImmediate(shift));
531       return;
532     }
533   }
534   VisitBinop(this, node, kMipsAnd, true, kMipsAnd);
535 }
536 
VisitWord32Or(Node * node)537 void InstructionSelector::VisitWord32Or(Node* node) {
538   VisitBinop(this, node, kMipsOr, true, kMipsOr);
539 }
540 
VisitWord32Xor(Node * node)541 void InstructionSelector::VisitWord32Xor(Node* node) {
542   Int32BinopMatcher m(node);
543   if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
544       m.right().Is(-1)) {
545     Int32BinopMatcher mleft(m.left().node());
546     if (!mleft.right().HasResolvedValue()) {
547       MipsOperandGenerator g(this);
548       Emit(kMipsNor, g.DefineAsRegister(node),
549            g.UseRegister(mleft.left().node()),
550            g.UseRegister(mleft.right().node()));
551       return;
552     }
553   }
554   if (m.right().Is(-1)) {
555     // Use Nor for bit negation and eliminate constant loading for xori.
556     MipsOperandGenerator g(this);
557     Emit(kMipsNor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
558          g.TempImmediate(0));
559     return;
560   }
561   VisitBinop(this, node, kMipsXor, true, kMipsXor);
562 }
563 
VisitWord32Shl(Node * node)564 void InstructionSelector::VisitWord32Shl(Node* node) {
565   Int32BinopMatcher m(node);
566   if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
567       m.right().IsInRange(1, 31)) {
568     MipsOperandGenerator g(this);
569     Int32BinopMatcher mleft(m.left().node());
570     // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
571     // contiguous, and the shift immediate non-zero.
572     if (mleft.right().HasResolvedValue()) {
573       uint32_t mask = mleft.right().ResolvedValue();
574       uint32_t mask_width = base::bits::CountPopulation(mask);
575       uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
576       if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
577         uint32_t shift = m.right().ResolvedValue();
578         DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
579         DCHECK_NE(0u, shift);
580         if ((shift + mask_width) >= 32) {
581           // If the mask is contiguous and reaches or extends beyond the top
582           // bit, only the shift is needed.
583           Emit(kMipsShl, g.DefineAsRegister(node),
584                g.UseRegister(mleft.left().node()),
585                g.UseImmediate(m.right().node()));
586           return;
587         }
588       }
589     }
590   }
591   VisitRRO(this, kMipsShl, node);
592 }
593 
VisitWord32Shr(Node * node)594 void InstructionSelector::VisitWord32Shr(Node* node) {
595   Int32BinopMatcher m(node);
596   if (m.left().IsWord32And() && m.right().HasResolvedValue()) {
597     uint32_t lsb = m.right().ResolvedValue() & 0x1F;
598     Int32BinopMatcher mleft(m.left().node());
599     if (mleft.right().HasResolvedValue() &&
600         mleft.right().ResolvedValue() != 0) {
601       // Select Ext for Shr(And(x, mask), imm) where the result of the mask is
602       // shifted into the least-significant bits.
603       uint32_t mask = (mleft.right().ResolvedValue() >> lsb) << lsb;
604       unsigned mask_width = base::bits::CountPopulation(mask);
605       unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
606       if ((mask_msb + mask_width + lsb) == 32) {
607         MipsOperandGenerator g(this);
608         DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
609         Emit(kMipsExt, g.DefineAsRegister(node),
610              g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
611              g.TempImmediate(mask_width));
612         return;
613       }
614     }
615   }
616   VisitRRO(this, kMipsShr, node);
617 }
618 
VisitWord32Sar(Node * node)619 void InstructionSelector::VisitWord32Sar(Node* node) {
620   Int32BinopMatcher m(node);
621   if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
622       m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
623     Int32BinopMatcher mleft(m.left().node());
624     if (m.right().HasResolvedValue() && mleft.right().HasResolvedValue()) {
625       MipsOperandGenerator g(this);
626       uint32_t sar = m.right().ResolvedValue();
627       uint32_t shl = mleft.right().ResolvedValue();
628       if ((sar == shl) && (sar == 16)) {
629         Emit(kMipsSeh, g.DefineAsRegister(node),
630              g.UseRegister(mleft.left().node()));
631         return;
632       } else if ((sar == shl) && (sar == 24)) {
633         Emit(kMipsSeb, g.DefineAsRegister(node),
634              g.UseRegister(mleft.left().node()));
635         return;
636       }
637     }
638   }
639   VisitRRO(this, kMipsSar, node);
640 }
641 
VisitInt32PairBinop(InstructionSelector * selector,InstructionCode pair_opcode,InstructionCode single_opcode,Node * node)642 static void VisitInt32PairBinop(InstructionSelector* selector,
643                                 InstructionCode pair_opcode,
644                                 InstructionCode single_opcode, Node* node) {
645   MipsOperandGenerator g(selector);
646 
647   Node* projection1 = NodeProperties::FindProjection(node, 1);
648 
649   if (projection1) {
650     // We use UseUniqueRegister here to avoid register sharing with the output
651     // register.
652     InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
653                                    g.UseUniqueRegister(node->InputAt(1)),
654                                    g.UseUniqueRegister(node->InputAt(2)),
655                                    g.UseUniqueRegister(node->InputAt(3))};
656 
657     InstructionOperand outputs[] = {
658         g.DefineAsRegister(node),
659         g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
660     selector->Emit(pair_opcode, 2, outputs, 4, inputs);
661   } else {
662     // The high word of the result is not used, so we emit the standard 32 bit
663     // instruction.
664     selector->Emit(single_opcode, g.DefineSameAsFirst(node),
665                    g.UseRegister(node->InputAt(0)),
666                    g.UseRegister(node->InputAt(2)));
667   }
668 }
669 
VisitInt32PairAdd(Node * node)670 void InstructionSelector::VisitInt32PairAdd(Node* node) {
671   VisitInt32PairBinop(this, kMipsAddPair, kMipsAdd, node);
672 }
673 
VisitInt32PairSub(Node * node)674 void InstructionSelector::VisitInt32PairSub(Node* node) {
675   VisitInt32PairBinop(this, kMipsSubPair, kMipsSub, node);
676 }
677 
VisitInt32PairMul(Node * node)678 void InstructionSelector::VisitInt32PairMul(Node* node) {
679   VisitInt32PairBinop(this, kMipsMulPair, kMipsMul, node);
680 }
681 
682 // Shared routine for multiple shift operations.
VisitWord32PairShift(InstructionSelector * selector,InstructionCode opcode,Node * node)683 static void VisitWord32PairShift(InstructionSelector* selector,
684                                  InstructionCode opcode, Node* node) {
685   MipsOperandGenerator g(selector);
686   Int32Matcher m(node->InputAt(2));
687   InstructionOperand shift_operand;
688   if (m.HasResolvedValue()) {
689     shift_operand = g.UseImmediate(m.node());
690   } else {
691     shift_operand = g.UseUniqueRegister(m.node());
692   }
693 
694   // We use UseUniqueRegister here to avoid register sharing with the output
695   // register.
696   InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
697                                  g.UseUniqueRegister(node->InputAt(1)),
698                                  shift_operand};
699 
700   Node* projection1 = NodeProperties::FindProjection(node, 1);
701 
702   InstructionOperand outputs[2];
703   InstructionOperand temps[1];
704   int32_t output_count = 0;
705   int32_t temp_count = 0;
706 
707   outputs[output_count++] = g.DefineAsRegister(node);
708   if (projection1) {
709     outputs[output_count++] = g.DefineAsRegister(projection1);
710   } else {
711     temps[temp_count++] = g.TempRegister();
712   }
713 
714   selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
715 }
716 
VisitWord32PairShl(Node * node)717 void InstructionSelector::VisitWord32PairShl(Node* node) {
718   VisitWord32PairShift(this, kMipsShlPair, node);
719 }
720 
VisitWord32PairShr(Node * node)721 void InstructionSelector::VisitWord32PairShr(Node* node) {
722   VisitWord32PairShift(this, kMipsShrPair, node);
723 }
724 
VisitWord32PairSar(Node * node)725 void InstructionSelector::VisitWord32PairSar(Node* node) {
726   VisitWord32PairShift(this, kMipsSarPair, node);
727 }
728 
VisitWord32Rol(Node * node)729 void InstructionSelector::VisitWord32Rol(Node* node) { UNREACHABLE(); }
730 
VisitWord32Ror(Node * node)731 void InstructionSelector::VisitWord32Ror(Node* node) {
732   VisitRRO(this, kMipsRor, node);
733 }
734 
VisitWord32Clz(Node * node)735 void InstructionSelector::VisitWord32Clz(Node* node) {
736   VisitRR(this, kMipsClz, node);
737 }
738 
VisitWord32AtomicPairLoad(Node * node)739 void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
740   MipsOperandGenerator g(this);
741   Node* base = node->InputAt(0);
742   Node* index = node->InputAt(1);
743   ArchOpcode opcode = kMipsWord32AtomicPairLoad;
744   AddressingMode addressing_mode = kMode_MRI;
745   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
746   InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index)};
747   InstructionOperand temps[3];
748   size_t temp_count = 0;
749   temps[temp_count++] = g.TempRegister(a0);
750   InstructionOperand outputs[2];
751   size_t output_count = 0;
752 
753   Node* projection0 = NodeProperties::FindProjection(node, 0);
754   Node* projection1 = NodeProperties::FindProjection(node, 1);
755   if (projection0) {
756     outputs[output_count++] = g.DefineAsFixed(projection0, v0);
757   } else {
758     temps[temp_count++] = g.TempRegister(v0);
759   }
760   if (projection1) {
761     outputs[output_count++] = g.DefineAsFixed(projection1, v1);
762   } else {
763     temps[temp_count++] = g.TempRegister(v1);
764   }
765   Emit(code, output_count, outputs, arraysize(inputs), inputs, temp_count,
766        temps);
767 }
768 
VisitWord32AtomicPairStore(Node * node)769 void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
770   MipsOperandGenerator g(this);
771   Node* base = node->InputAt(0);
772   Node* index = node->InputAt(1);
773   Node* value_low = node->InputAt(2);
774   Node* value_high = node->InputAt(3);
775 
776   InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
777                                  g.UseFixed(value_low, a1),
778                                  g.UseFixed(value_high, a2)};
779   InstructionOperand temps[] = {g.TempRegister(a0), g.TempRegister(),
780                                 g.TempRegister()};
781   Emit(kMipsWord32AtomicPairStore | AddressingModeField::encode(kMode_MRI), 0,
782        nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
783 }
784 
VisitWord32AtomicPairAdd(Node * node)785 void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) {
786   VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairAdd);
787 }
788 
VisitWord32AtomicPairSub(Node * node)789 void InstructionSelector::VisitWord32AtomicPairSub(Node* node) {
790   VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairSub);
791 }
792 
VisitWord32AtomicPairAnd(Node * node)793 void InstructionSelector::VisitWord32AtomicPairAnd(Node* node) {
794   VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairAnd);
795 }
796 
VisitWord32AtomicPairOr(Node * node)797 void InstructionSelector::VisitWord32AtomicPairOr(Node* node) {
798   VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairOr);
799 }
800 
VisitWord32AtomicPairXor(Node * node)801 void InstructionSelector::VisitWord32AtomicPairXor(Node* node) {
802   VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairXor);
803 }
804 
VisitWord32AtomicPairExchange(Node * node)805 void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) {
806   VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairExchange);
807 }
808 
VisitWord32AtomicPairCompareExchange(Node * node)809 void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
810   MipsOperandGenerator g(this);
811   InstructionOperand inputs[] = {
812       g.UseRegister(node->InputAt(0)),  g.UseRegister(node->InputAt(1)),
813       g.UseFixed(node->InputAt(2), a1), g.UseFixed(node->InputAt(3), a2),
814       g.UseFixed(node->InputAt(4), a3), g.UseUniqueRegister(node->InputAt(5))};
815 
816   InstructionCode code = kMipsWord32AtomicPairCompareExchange |
817                          AddressingModeField::encode(kMode_MRI);
818   Node* projection0 = NodeProperties::FindProjection(node, 0);
819   Node* projection1 = NodeProperties::FindProjection(node, 1);
820   InstructionOperand outputs[2];
821   size_t output_count = 0;
822   InstructionOperand temps[3];
823   size_t temp_count = 0;
824   temps[temp_count++] = g.TempRegister(a0);
825   if (projection0) {
826     outputs[output_count++] = g.DefineAsFixed(projection0, v0);
827   } else {
828     temps[temp_count++] = g.TempRegister(v0);
829   }
830   if (projection1) {
831     outputs[output_count++] = g.DefineAsFixed(projection1, v1);
832   } else {
833     temps[temp_count++] = g.TempRegister(v1);
834   }
835   Emit(code, output_count, outputs, arraysize(inputs), inputs, temp_count,
836        temps);
837 }
838 
VisitWord32ReverseBits(Node * node)839 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
840 
VisitWord64ReverseBytes(Node * node)841 void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
842 
VisitWord32ReverseBytes(Node * node)843 void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
844   MipsOperandGenerator g(this);
845   Emit(kMipsByteSwap32, g.DefineAsRegister(node),
846        g.UseRegister(node->InputAt(0)));
847 }
848 
VisitSimd128ReverseBytes(Node * node)849 void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
850   UNREACHABLE();
851 }
852 
VisitWord32Ctz(Node * node)853 void InstructionSelector::VisitWord32Ctz(Node* node) {
854   MipsOperandGenerator g(this);
855   Emit(kMipsCtz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
856 }
857 
VisitWord32Popcnt(Node * node)858 void InstructionSelector::VisitWord32Popcnt(Node* node) {
859   MipsOperandGenerator g(this);
860   Emit(kMipsPopcnt, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
861 }
862 
VisitInt32Add(Node * node)863 void InstructionSelector::VisitInt32Add(Node* node) {
864   MipsOperandGenerator g(this);
865   Int32BinopMatcher m(node);
866 
867   if (IsMipsArchVariant(kMips32r6)) {
868     // Select Lsa for (left + (left_of_right << imm)).
869     if (m.right().opcode() == IrOpcode::kWord32Shl &&
870         CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
871       Int32BinopMatcher mright(m.right().node());
872       if (mright.right().HasResolvedValue() && !m.left().HasResolvedValue()) {
873         int32_t shift_value =
874             static_cast<int32_t>(mright.right().ResolvedValue());
875         if (shift_value > 0 && shift_value <= 31) {
876           Emit(kMipsLsa, g.DefineAsRegister(node),
877                g.UseRegister(m.left().node()),
878                g.UseRegister(mright.left().node()),
879                g.TempImmediate(shift_value));
880           return;
881         }
882       }
883     }
884 
885     // Select Lsa for ((left_of_left << imm) + right).
886     if (m.left().opcode() == IrOpcode::kWord32Shl &&
887         CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
888       Int32BinopMatcher mleft(m.left().node());
889       if (mleft.right().HasResolvedValue() && !m.right().HasResolvedValue()) {
890         int32_t shift_value =
891             static_cast<int32_t>(mleft.right().ResolvedValue());
892         if (shift_value > 0 && shift_value <= 31) {
893           Emit(kMipsLsa, g.DefineAsRegister(node),
894                g.UseRegister(m.right().node()),
895                g.UseRegister(mleft.left().node()),
896                g.TempImmediate(shift_value));
897           return;
898         }
899       }
900     }
901   }
902 
903   VisitBinop(this, node, kMipsAdd, true, kMipsAdd);
904 }
905 
VisitInt32Sub(Node * node)906 void InstructionSelector::VisitInt32Sub(Node* node) {
907   VisitBinop(this, node, kMipsSub);
908 }
909 
VisitInt32Mul(Node * node)910 void InstructionSelector::VisitInt32Mul(Node* node) {
911   MipsOperandGenerator g(this);
912   Int32BinopMatcher m(node);
913   if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
914     uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
915     if (base::bits::IsPowerOfTwo(value)) {
916       Emit(kMipsShl | AddressingModeField::encode(kMode_None),
917            g.DefineAsRegister(node), g.UseRegister(m.left().node()),
918            g.TempImmediate(base::bits::WhichPowerOfTwo(value)));
919       return;
920     }
921     if (base::bits::IsPowerOfTwo(value - 1) && IsMipsArchVariant(kMips32r6) &&
922         value - 1 > 0 && value - 1 <= 31) {
923       Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
924            g.UseRegister(m.left().node()),
925            g.TempImmediate(base::bits::WhichPowerOfTwo(value - 1)));
926       return;
927     }
928     if (base::bits::IsPowerOfTwo(value + 1)) {
929       InstructionOperand temp = g.TempRegister();
930       Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp,
931            g.UseRegister(m.left().node()),
932            g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1)));
933       Emit(kMipsSub | AddressingModeField::encode(kMode_None),
934            g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
935       return;
936     }
937   }
938   VisitRRR(this, kMipsMul, node);
939 }
940 
VisitInt32MulHigh(Node * node)941 void InstructionSelector::VisitInt32MulHigh(Node* node) {
942   VisitRRR(this, kMipsMulHigh, node);
943 }
944 
VisitUint32MulHigh(Node * node)945 void InstructionSelector::VisitUint32MulHigh(Node* node) {
946   MipsOperandGenerator g(this);
947   Emit(kMipsMulHighU, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
948        g.UseRegister(node->InputAt(1)));
949 }
950 
VisitInt32Div(Node * node)951 void InstructionSelector::VisitInt32Div(Node* node) {
952   MipsOperandGenerator g(this);
953   Int32BinopMatcher m(node);
954   Emit(kMipsDiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
955        g.UseRegister(m.right().node()));
956 }
957 
VisitUint32Div(Node * node)958 void InstructionSelector::VisitUint32Div(Node* node) {
959   MipsOperandGenerator g(this);
960   Int32BinopMatcher m(node);
961   Emit(kMipsDivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
962        g.UseRegister(m.right().node()));
963 }
964 
VisitInt32Mod(Node * node)965 void InstructionSelector::VisitInt32Mod(Node* node) {
966   MipsOperandGenerator g(this);
967   Int32BinopMatcher m(node);
968   Emit(kMipsMod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
969        g.UseRegister(m.right().node()));
970 }
971 
VisitUint32Mod(Node * node)972 void InstructionSelector::VisitUint32Mod(Node* node) {
973   MipsOperandGenerator g(this);
974   Int32BinopMatcher m(node);
975   Emit(kMipsModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
976        g.UseRegister(m.right().node()));
977 }
978 
VisitChangeFloat32ToFloat64(Node * node)979 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
980   VisitRR(this, kMipsCvtDS, node);
981 }
982 
VisitRoundInt32ToFloat32(Node * node)983 void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
984   VisitRR(this, kMipsCvtSW, node);
985 }
986 
VisitRoundUint32ToFloat32(Node * node)987 void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
988   VisitRR(this, kMipsCvtSUw, node);
989 }
990 
VisitChangeInt32ToFloat64(Node * node)991 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
992   VisitRR(this, kMipsCvtDW, node);
993 }
994 
VisitChangeUint32ToFloat64(Node * node)995 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
996   VisitRR(this, kMipsCvtDUw, node);
997 }
998 
VisitTruncateFloat32ToInt32(Node * node)999 void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
1000   MipsOperandGenerator g(this);
1001   InstructionCode opcode = kMipsTruncWS;
1002   TruncateKind kind = OpParameter<TruncateKind>(node->op());
1003   if (kind == TruncateKind::kSetOverflowToMin) {
1004     opcode |= MiscField::encode(true);
1005   }
1006 
1007   Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
1008 }
1009 
VisitTruncateFloat32ToUint32(Node * node)1010 void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
1011   MipsOperandGenerator g(this);
1012   InstructionCode opcode = kMipsTruncUwS;
1013   TruncateKind kind = OpParameter<TruncateKind>(node->op());
1014   if (kind == TruncateKind::kSetOverflowToMin) {
1015     opcode |= MiscField::encode(true);
1016   }
1017 
1018   Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
1019 }
1020 
VisitChangeFloat64ToInt32(Node * node)1021 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
1022   MipsOperandGenerator g(this);
1023   Node* value = node->InputAt(0);
1024   // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction
1025   // which does rounding and conversion to integer format.
1026   if (CanCover(node, value)) {
1027     switch (value->opcode()) {
1028       case IrOpcode::kFloat64RoundDown:
1029         Emit(kMipsFloorWD, g.DefineAsRegister(node),
1030              g.UseRegister(value->InputAt(0)));
1031         return;
1032       case IrOpcode::kFloat64RoundUp:
1033         Emit(kMipsCeilWD, g.DefineAsRegister(node),
1034              g.UseRegister(value->InputAt(0)));
1035         return;
1036       case IrOpcode::kFloat64RoundTiesEven:
1037         Emit(kMipsRoundWD, g.DefineAsRegister(node),
1038              g.UseRegister(value->InputAt(0)));
1039         return;
1040       case IrOpcode::kFloat64RoundTruncate:
1041         Emit(kMipsTruncWD, g.DefineAsRegister(node),
1042              g.UseRegister(value->InputAt(0)));
1043         return;
1044       default:
1045         break;
1046     }
1047     if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
1048       Node* next = value->InputAt(0);
1049       if (CanCover(value, next)) {
1050         // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP))
1051         switch (next->opcode()) {
1052           case IrOpcode::kFloat32RoundDown:
1053             Emit(kMipsFloorWS, g.DefineAsRegister(node),
1054                  g.UseRegister(next->InputAt(0)));
1055             return;
1056           case IrOpcode::kFloat32RoundUp:
1057             Emit(kMipsCeilWS, g.DefineAsRegister(node),
1058                  g.UseRegister(next->InputAt(0)));
1059             return;
1060           case IrOpcode::kFloat32RoundTiesEven:
1061             Emit(kMipsRoundWS, g.DefineAsRegister(node),
1062                  g.UseRegister(next->InputAt(0)));
1063             return;
1064           case IrOpcode::kFloat32RoundTruncate:
1065             Emit(kMipsTruncWS, g.DefineAsRegister(node),
1066                  g.UseRegister(next->InputAt(0)));
1067             return;
1068           default:
1069             Emit(kMipsTruncWS, g.DefineAsRegister(node),
1070                  g.UseRegister(value->InputAt(0)));
1071             return;
1072         }
1073       } else {
1074         // Match float32 -> float64 -> int32 representation change path.
1075         Emit(kMipsTruncWS, g.DefineAsRegister(node),
1076              g.UseRegister(value->InputAt(0)));
1077         return;
1078       }
1079     }
1080   }
1081   VisitRR(this, kMipsTruncWD, node);
1082 }
1083 
VisitChangeFloat64ToUint32(Node * node)1084 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
1085   VisitRR(this, kMipsTruncUwD, node);
1086 }
1087 
VisitTruncateFloat64ToUint32(Node * node)1088 void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
1089   VisitRR(this, kMipsTruncUwD, node);
1090 }
1091 
VisitTruncateFloat64ToFloat32(Node * node)1092 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
1093   MipsOperandGenerator g(this);
1094   Node* value = node->InputAt(0);
1095   // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
1096   // instruction.
1097   if (CanCover(node, value) &&
1098       value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
1099     Emit(kMipsCvtSW, g.DefineAsRegister(node),
1100          g.UseRegister(value->InputAt(0)));
1101     return;
1102   }
1103   VisitRR(this, kMipsCvtSD, node);
1104 }
1105 
VisitTruncateFloat64ToWord32(Node * node)1106 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
1107   VisitRR(this, kArchTruncateDoubleToI, node);
1108 }
1109 
VisitRoundFloat64ToInt32(Node * node)1110 void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
1111   VisitRR(this, kMipsTruncWD, node);
1112 }
1113 
VisitBitcastFloat32ToInt32(Node * node)1114 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
1115   VisitRR(this, kMipsFloat64ExtractLowWord32, node);
1116 }
1117 
VisitBitcastInt32ToFloat32(Node * node)1118 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
1119   MipsOperandGenerator g(this);
1120   Emit(kMipsFloat64InsertLowWord32, g.DefineAsRegister(node),
1121        ImmediateOperand(ImmediateOperand::INLINE_INT32, 0),
1122        g.UseRegister(node->InputAt(0)));
1123 }
1124 
VisitFloat32Add(Node * node)1125 void InstructionSelector::VisitFloat32Add(Node* node) {
1126   MipsOperandGenerator g(this);
1127   if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.S(z, x, y).
1128     Float32BinopMatcher m(node);
1129     if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
1130       // For Add.S(Mul.S(x, y), z):
1131       Float32BinopMatcher mleft(m.left().node());
1132       Emit(kMipsMaddS, g.DefineAsRegister(node),
1133            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
1134            g.UseRegister(mleft.right().node()));
1135       return;
1136     }
1137     if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
1138       // For Add.S(x, Mul.S(y, z)):
1139       Float32BinopMatcher mright(m.right().node());
1140       Emit(kMipsMaddS, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1141            g.UseRegister(mright.left().node()),
1142            g.UseRegister(mright.right().node()));
1143       return;
1144     }
1145   }
1146   VisitRRR(this, kMipsAddS, node);
1147 }
1148 
VisitFloat64Add(Node * node)1149 void InstructionSelector::VisitFloat64Add(Node* node) {
1150   MipsOperandGenerator g(this);
1151   if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.S(z, x, y).
1152     Float64BinopMatcher m(node);
1153     if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
1154       // For Add.D(Mul.D(x, y), z):
1155       Float64BinopMatcher mleft(m.left().node());
1156       Emit(kMipsMaddD, g.DefineAsRegister(node),
1157            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
1158            g.UseRegister(mleft.right().node()));
1159       return;
1160     }
1161     if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
1162       // For Add.D(x, Mul.D(y, z)):
1163       Float64BinopMatcher mright(m.right().node());
1164       Emit(kMipsMaddD, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1165            g.UseRegister(mright.left().node()),
1166            g.UseRegister(mright.right().node()));
1167       return;
1168     }
1169   }
1170   VisitRRR(this, kMipsAddD, node);
1171 }
1172 
VisitFloat32Sub(Node * node)1173 void InstructionSelector::VisitFloat32Sub(Node* node) {
1174   MipsOperandGenerator g(this);
1175   if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.S(z, x, y).
1176     Float32BinopMatcher m(node);
1177     if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
1178       // For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y).
1179       Float32BinopMatcher mleft(m.left().node());
1180       Emit(kMipsMsubS, g.DefineAsRegister(node),
1181            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
1182            g.UseRegister(mleft.right().node()));
1183       return;
1184     }
1185   }
1186   VisitRRR(this, kMipsSubS, node);
1187 }
1188 
VisitFloat64Sub(Node * node)1189 void InstructionSelector::VisitFloat64Sub(Node* node) {
1190   MipsOperandGenerator g(this);
1191   if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.S(z, x, y).
1192     Float64BinopMatcher m(node);
1193     if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
1194       // For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y).
1195       Float64BinopMatcher mleft(m.left().node());
1196       Emit(kMipsMsubD, g.DefineAsRegister(node),
1197            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
1198            g.UseRegister(mleft.right().node()));
1199       return;
1200     }
1201   }
1202   VisitRRR(this, kMipsSubD, node);
1203 }
1204 
VisitFloat32Mul(Node * node)1205 void InstructionSelector::VisitFloat32Mul(Node* node) {
1206   VisitRRR(this, kMipsMulS, node);
1207 }
1208 
VisitFloat64Mul(Node * node)1209 void InstructionSelector::VisitFloat64Mul(Node* node) {
1210   VisitRRR(this, kMipsMulD, node);
1211 }
1212 
VisitFloat32Div(Node * node)1213 void InstructionSelector::VisitFloat32Div(Node* node) {
1214   VisitRRR(this, kMipsDivS, node);
1215 }
1216 
VisitFloat64Div(Node * node)1217 void InstructionSelector::VisitFloat64Div(Node* node) {
1218   VisitRRR(this, kMipsDivD, node);
1219 }
1220 
VisitFloat64Mod(Node * node)1221 void InstructionSelector::VisitFloat64Mod(Node* node) {
1222   MipsOperandGenerator g(this);
1223   Emit(kMipsModD, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12),
1224        g.UseFixed(node->InputAt(1), f14))
1225       ->MarkAsCall();
1226 }
1227 
VisitFloat32Max(Node * node)1228 void InstructionSelector::VisitFloat32Max(Node* node) {
1229   MipsOperandGenerator g(this);
1230   Emit(kMipsFloat32Max, g.DefineAsRegister(node),
1231        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1232 }
1233 
VisitFloat64Max(Node * node)1234 void InstructionSelector::VisitFloat64Max(Node* node) {
1235   MipsOperandGenerator g(this);
1236   Emit(kMipsFloat64Max, g.DefineAsRegister(node),
1237        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1238 }
1239 
VisitFloat32Min(Node * node)1240 void InstructionSelector::VisitFloat32Min(Node* node) {
1241   MipsOperandGenerator g(this);
1242   Emit(kMipsFloat32Min, g.DefineAsRegister(node),
1243        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1244 }
1245 
VisitFloat64Min(Node * node)1246 void InstructionSelector::VisitFloat64Min(Node* node) {
1247   MipsOperandGenerator g(this);
1248   Emit(kMipsFloat64Min, g.DefineAsRegister(node),
1249        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1250 }
1251 
VisitFloat32Abs(Node * node)1252 void InstructionSelector::VisitFloat32Abs(Node* node) {
1253   VisitRR(this, kMipsAbsS, node);
1254 }
1255 
VisitFloat64Abs(Node * node)1256 void InstructionSelector::VisitFloat64Abs(Node* node) {
1257   VisitRR(this, kMipsAbsD, node);
1258 }
1259 
VisitFloat32Sqrt(Node * node)1260 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
1261   VisitRR(this, kMipsSqrtS, node);
1262 }
1263 
VisitFloat64Sqrt(Node * node)1264 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
1265   VisitRR(this, kMipsSqrtD, node);
1266 }
1267 
VisitFloat32RoundDown(Node * node)1268 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
1269   VisitRR(this, kMipsFloat32RoundDown, node);
1270 }
1271 
VisitFloat64RoundDown(Node * node)1272 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1273   VisitRR(this, kMipsFloat64RoundDown, node);
1274 }
1275 
VisitFloat32RoundUp(Node * node)1276 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
1277   VisitRR(this, kMipsFloat32RoundUp, node);
1278 }
1279 
VisitFloat64RoundUp(Node * node)1280 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
1281   VisitRR(this, kMipsFloat64RoundUp, node);
1282 }
1283 
VisitFloat32RoundTruncate(Node * node)1284 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
1285   VisitRR(this, kMipsFloat32RoundTruncate, node);
1286 }
1287 
VisitFloat64RoundTruncate(Node * node)1288 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1289   VisitRR(this, kMipsFloat64RoundTruncate, node);
1290 }
1291 
VisitFloat64RoundTiesAway(Node * node)1292 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1293   UNREACHABLE();
1294 }
1295 
VisitFloat32RoundTiesEven(Node * node)1296 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
1297   VisitRR(this, kMipsFloat32RoundTiesEven, node);
1298 }
1299 
VisitFloat64RoundTiesEven(Node * node)1300 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
1301   VisitRR(this, kMipsFloat64RoundTiesEven, node);
1302 }
1303 
VisitFloat32Neg(Node * node)1304 void InstructionSelector::VisitFloat32Neg(Node* node) {
1305   VisitRR(this, kMipsNegS, node);
1306 }
1307 
VisitFloat64Neg(Node * node)1308 void InstructionSelector::VisitFloat64Neg(Node* node) {
1309   VisitRR(this, kMipsNegD, node);
1310 }
1311 
VisitFloat64Ieee754Binop(Node * node,InstructionCode opcode)1312 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1313                                                    InstructionCode opcode) {
1314   MipsOperandGenerator g(this);
1315   Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f2),
1316        g.UseFixed(node->InputAt(1), f4))
1317       ->MarkAsCall();
1318 }
1319 
VisitFloat64Ieee754Unop(Node * node,InstructionCode opcode)1320 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1321                                                   InstructionCode opcode) {
1322   MipsOperandGenerator g(this);
1323   Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12))
1324       ->MarkAsCall();
1325 }
1326 
EmitPrepareArguments(ZoneVector<PushParameter> * arguments,const CallDescriptor * call_descriptor,Node * node)1327 void InstructionSelector::EmitPrepareArguments(
1328     ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
1329     Node* node) {
1330   MipsOperandGenerator g(this);
1331 
1332   // Prepare for C function call.
1333   if (call_descriptor->IsCFunctionCall()) {
1334     Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
1335                                          call_descriptor->ParameterCount())),
1336          0, nullptr, 0, nullptr);
1337 
1338     // Poke any stack arguments.
1339     int slot = kCArgSlotCount;
1340     for (PushParameter input : (*arguments)) {
1341       if (input.node) {
1342         Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1343              g.TempImmediate(slot << kSystemPointerSizeLog2));
1344         ++slot;
1345       }
1346     }
1347   } else {
1348     // Possibly align stack here for functions.
1349     int push_count = static_cast<int>(call_descriptor->ParameterSlotCount());
1350     if (push_count > 0) {
1351       // Calculate needed space
1352       int stack_size = 0;
1353       for (size_t n = 0; n < arguments->size(); ++n) {
1354         PushParameter input = (*arguments)[n];
1355         if (input.node) {
1356           stack_size += input.location.GetSizeInPointers();
1357         }
1358       }
1359       Emit(kMipsStackClaim, g.NoOutput(),
1360            g.TempImmediate(stack_size << kSystemPointerSizeLog2));
1361     }
1362     for (size_t n = 0; n < arguments->size(); ++n) {
1363       PushParameter input = (*arguments)[n];
1364       if (input.node) {
1365         Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1366              g.TempImmediate(n << kSystemPointerSizeLog2));
1367       }
1368     }
1369   }
1370 }
1371 
EmitPrepareResults(ZoneVector<PushParameter> * results,const CallDescriptor * call_descriptor,Node * node)1372 void InstructionSelector::EmitPrepareResults(
1373     ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
1374     Node* node) {
1375   MipsOperandGenerator g(this);
1376 
1377   for (PushParameter output : *results) {
1378     if (!output.location.IsCallerFrameSlot()) continue;
1379     // Skip any alignment holes in nodes.
1380     if (output.node != nullptr) {
1381       DCHECK(!call_descriptor->IsCFunctionCall());
1382       if (output.location.GetType() == MachineType::Float32()) {
1383         MarkAsFloat32(output.node);
1384       } else if (output.location.GetType() == MachineType::Float64()) {
1385         MarkAsFloat64(output.node);
1386       } else if (output.location.GetType() == MachineType::Simd128()) {
1387         MarkAsSimd128(output.node);
1388       }
1389       int offset = call_descriptor->GetOffsetToReturns();
1390       int reverse_slot = -output.location.GetLocation() - offset;
1391       Emit(kMipsPeek, g.DefineAsRegister(output.node),
1392            g.UseImmediate(reverse_slot));
1393     }
1394   }
1395 }
1396 
IsTailCallAddressImmediate()1397 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
1398 
VisitUnalignedLoad(Node * node)1399 void InstructionSelector::VisitUnalignedLoad(Node* node) {
1400   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
1401   MipsOperandGenerator g(this);
1402   Node* base = node->InputAt(0);
1403   Node* index = node->InputAt(1);
1404 
1405   ArchOpcode opcode;
1406   switch (load_rep.representation()) {
1407     case MachineRepresentation::kWord8:
1408       opcode = load_rep.IsUnsigned() ? kMipsLbu : kMipsLb;
1409       break;
1410     case MachineRepresentation::kWord16:
1411       opcode = load_rep.IsUnsigned() ? kMipsUlhu : kMipsUlh;
1412       break;
1413     case MachineRepresentation::kTaggedSigned:   // Fall through.
1414     case MachineRepresentation::kTaggedPointer:  // Fall through.
1415     case MachineRepresentation::kTagged:         // Fall through.
1416     case MachineRepresentation::kWord32:
1417       opcode = kMipsUlw;
1418       break;
1419     case MachineRepresentation::kFloat32:
1420       opcode = kMipsUlwc1;
1421       break;
1422     case MachineRepresentation::kFloat64:
1423       opcode = kMipsUldc1;
1424       break;
1425     case MachineRepresentation::kSimd128:
1426       opcode = kMipsMsaLd;
1427       break;
1428     case MachineRepresentation::kBit:                // Fall through.
1429     case MachineRepresentation::kCompressedPointer:  // Fall through.
1430     case MachineRepresentation::kCompressed:         // Fall through.
1431     case MachineRepresentation::kSandboxedPointer:   // Fall through.
1432     case MachineRepresentation::kWord64:             // Fall through.
1433     case MachineRepresentation::kMapWord:            // Fall through.
1434     case MachineRepresentation::kNone:
1435       UNREACHABLE();
1436   }
1437 
1438   if (g.CanBeImmediate(index, opcode)) {
1439     Emit(opcode | AddressingModeField::encode(kMode_MRI),
1440          g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
1441   } else {
1442     InstructionOperand addr_reg = g.TempRegister();
1443     Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
1444          g.UseRegister(index), g.UseRegister(base));
1445     // Emit desired load opcode, using temp addr_reg.
1446     Emit(opcode | AddressingModeField::encode(kMode_MRI),
1447          g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
1448   }
1449 }
1450 
VisitUnalignedStore(Node * node)1451 void InstructionSelector::VisitUnalignedStore(Node* node) {
1452   MipsOperandGenerator g(this);
1453   Node* base = node->InputAt(0);
1454   Node* index = node->InputAt(1);
1455   Node* value = node->InputAt(2);
1456 
1457   UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op());
1458 
1459   // TODO(mips): I guess this could be done in a better way.
1460   ArchOpcode opcode;
1461   switch (rep) {
1462     case MachineRepresentation::kFloat32:
1463       opcode = kMipsUswc1;
1464       break;
1465     case MachineRepresentation::kFloat64:
1466       opcode = kMipsUsdc1;
1467       break;
1468     case MachineRepresentation::kWord8:
1469       opcode = kMipsSb;
1470       break;
1471     case MachineRepresentation::kWord16:
1472       opcode = kMipsUsh;
1473       break;
1474     case MachineRepresentation::kTaggedSigned:   // Fall through.
1475     case MachineRepresentation::kTaggedPointer:  // Fall through.
1476     case MachineRepresentation::kTagged:         // Fall through.
1477     case MachineRepresentation::kWord32:
1478       opcode = kMipsUsw;
1479       break;
1480     case MachineRepresentation::kSimd128:
1481       opcode = kMipsMsaSt;
1482       break;
1483     case MachineRepresentation::kBit:                // Fall through.
1484     case MachineRepresentation::kCompressedPointer:  // Fall through.
1485     case MachineRepresentation::kCompressed:         // Fall through.
1486     case MachineRepresentation::kSandboxedPointer:   // Fall through.
1487     case MachineRepresentation::kWord64:             // Fall through.
1488     case MachineRepresentation::kMapWord:            // Fall through.
1489     case MachineRepresentation::kNone:
1490       UNREACHABLE();
1491   }
1492 
1493   if (g.CanBeImmediate(index, opcode)) {
1494     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1495          g.UseRegister(base), g.UseImmediate(index),
1496          g.UseRegisterOrImmediateZero(value));
1497   } else {
1498     InstructionOperand addr_reg = g.TempRegister();
1499     Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
1500          g.UseRegister(index), g.UseRegister(base));
1501     // Emit desired store opcode, using temp addr_reg.
1502     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1503          addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
1504   }
1505 }
1506 
1507 namespace {
1508 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,InstructionOperand left,InstructionOperand right,FlagsContinuation * cont)1509 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1510                          InstructionOperand left, InstructionOperand right,
1511                          FlagsContinuation* cont) {
1512   selector->EmitWithContinuation(opcode, left, right, cont);
1513 }
1514 
1515 // Shared routine for multiple float32 compare operations.
VisitFloat32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1516 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1517                          FlagsContinuation* cont) {
1518   MipsOperandGenerator g(selector);
1519   Float32BinopMatcher m(node);
1520   InstructionOperand lhs, rhs;
1521 
1522   lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1523                           : g.UseRegister(m.left().node());
1524   rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1525                            : g.UseRegister(m.right().node());
1526   VisitCompare(selector, kMipsCmpS, lhs, rhs, cont);
1527 }
1528 
1529 // Shared routine for multiple float64 compare operations.
VisitFloat64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1530 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1531                          FlagsContinuation* cont) {
1532   MipsOperandGenerator g(selector);
1533   Float64BinopMatcher m(node);
1534   InstructionOperand lhs, rhs;
1535 
1536   lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1537                           : g.UseRegister(m.left().node());
1538   rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1539                            : g.UseRegister(m.right().node());
1540   VisitCompare(selector, kMipsCmpD, lhs, rhs, cont);
1541 }
1542 
1543 // Shared routine for multiple word compare operations.
VisitWordCompare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont,bool commutative)1544 void VisitWordCompare(InstructionSelector* selector, Node* node,
1545                       InstructionCode opcode, FlagsContinuation* cont,
1546                       bool commutative) {
1547   MipsOperandGenerator g(selector);
1548   Node* left = node->InputAt(0);
1549   Node* right = node->InputAt(1);
1550 
1551   // Match immediates on left or right side of comparison.
1552   if (g.CanBeImmediate(right, opcode)) {
1553     if (opcode == kMipsTst) {
1554       VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
1555                    cont);
1556     } else {
1557       switch (cont->condition()) {
1558         case kEqual:
1559         case kNotEqual:
1560           if (cont->IsSet()) {
1561             VisitCompare(selector, opcode, g.UseRegister(left),
1562                          g.UseImmediate(right), cont);
1563           } else {
1564             VisitCompare(selector, opcode, g.UseRegister(left),
1565                          g.UseRegister(right), cont);
1566           }
1567           break;
1568         case kSignedLessThan:
1569         case kSignedGreaterThanOrEqual:
1570         case kUnsignedLessThan:
1571         case kUnsignedGreaterThanOrEqual:
1572           VisitCompare(selector, opcode, g.UseRegister(left),
1573                        g.UseImmediate(right), cont);
1574           break;
1575         default:
1576           VisitCompare(selector, opcode, g.UseRegister(left),
1577                        g.UseRegister(right), cont);
1578       }
1579     }
1580   } else if (g.CanBeImmediate(left, opcode)) {
1581     if (!commutative) cont->Commute();
1582     if (opcode == kMipsTst) {
1583       VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
1584                    cont);
1585     } else {
1586       switch (cont->condition()) {
1587         case kEqual:
1588         case kNotEqual:
1589           if (cont->IsSet()) {
1590             VisitCompare(selector, opcode, g.UseRegister(right),
1591                          g.UseImmediate(left), cont);
1592           } else {
1593             VisitCompare(selector, opcode, g.UseRegister(right),
1594                          g.UseRegister(left), cont);
1595           }
1596           break;
1597         case kSignedLessThan:
1598         case kSignedGreaterThanOrEqual:
1599         case kUnsignedLessThan:
1600         case kUnsignedGreaterThanOrEqual:
1601           VisitCompare(selector, opcode, g.UseRegister(right),
1602                        g.UseImmediate(left), cont);
1603           break;
1604         default:
1605           VisitCompare(selector, opcode, g.UseRegister(right),
1606                        g.UseRegister(left), cont);
1607       }
1608     }
1609   } else {
1610     VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
1611                  cont);
1612   }
1613 }
1614 
VisitWordCompare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1615 void VisitWordCompare(InstructionSelector* selector, Node* node,
1616                       FlagsContinuation* cont) {
1617   VisitWordCompare(selector, node, kMipsCmp, cont, false);
1618 }
1619 
1620 }  // namespace
1621 
VisitStackPointerGreaterThan(Node * node,FlagsContinuation * cont)1622 void InstructionSelector::VisitStackPointerGreaterThan(
1623     Node* node, FlagsContinuation* cont) {
1624   StackCheckKind kind = StackCheckKindOf(node->op());
1625   InstructionCode opcode =
1626       kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
1627 
1628   MipsOperandGenerator g(this);
1629 
1630   // No outputs.
1631   InstructionOperand* const outputs = nullptr;
1632   const int output_count = 0;
1633 
1634   // TempRegister(0) is used to store the comparison result.
1635   // Applying an offset to this stack check requires a temp register. Offsets
1636   // are only applied to the first stack check. If applying an offset, we must
1637   // ensure the input and temp registers do not alias, thus kUniqueRegister.
1638   InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
1639   const int temp_count = (kind == StackCheckKind::kJSFunctionEntry ? 2 : 1);
1640   const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
1641                                  ? OperandGenerator::kUniqueRegister
1642                                  : OperandGenerator::kRegister;
1643 
1644   Node* const value = node->InputAt(0);
1645   InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)};
1646   static constexpr int input_count = arraysize(inputs);
1647 
1648   EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
1649                        temp_count, temps, cont);
1650 }
1651 
1652 // Shared routine for word comparisons against zero.
VisitWordCompareZero(Node * user,Node * value,FlagsContinuation * cont)1653 void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
1654                                                FlagsContinuation* cont) {
1655   // Try to combine with comparisons against 0 by simply inverting the branch.
1656   while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
1657     Int32BinopMatcher m(value);
1658     if (!m.right().Is(0)) break;
1659 
1660     user = value;
1661     value = m.left().node();
1662     cont->Negate();
1663   }
1664 
1665   if (CanCover(user, value)) {
1666     switch (value->opcode()) {
1667       case IrOpcode::kWord32Equal:
1668         cont->OverwriteAndNegateIfEqual(kEqual);
1669         return VisitWordCompare(this, value, cont);
1670       case IrOpcode::kInt32LessThan:
1671         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1672         return VisitWordCompare(this, value, cont);
1673       case IrOpcode::kInt32LessThanOrEqual:
1674         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1675         return VisitWordCompare(this, value, cont);
1676       case IrOpcode::kUint32LessThan:
1677         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1678         return VisitWordCompare(this, value, cont);
1679       case IrOpcode::kUint32LessThanOrEqual:
1680         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1681         return VisitWordCompare(this, value, cont);
1682       case IrOpcode::kFloat32Equal:
1683         cont->OverwriteAndNegateIfEqual(kEqual);
1684         return VisitFloat32Compare(this, value, cont);
1685       case IrOpcode::kFloat32LessThan:
1686         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1687         return VisitFloat32Compare(this, value, cont);
1688       case IrOpcode::kFloat32LessThanOrEqual:
1689         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1690         return VisitFloat32Compare(this, value, cont);
1691       case IrOpcode::kFloat64Equal:
1692         cont->OverwriteAndNegateIfEqual(kEqual);
1693         return VisitFloat64Compare(this, value, cont);
1694       case IrOpcode::kFloat64LessThan:
1695         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1696         return VisitFloat64Compare(this, value, cont);
1697       case IrOpcode::kFloat64LessThanOrEqual:
1698         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1699         return VisitFloat64Compare(this, value, cont);
1700       case IrOpcode::kProjection:
1701         // Check if this is the overflow output projection of an
1702         // <Operation>WithOverflow node.
1703         if (ProjectionIndexOf(value->op()) == 1u) {
1704           // We cannot combine the <Operation>WithOverflow with this branch
1705           // unless the 0th projection (the use of the actual value of the
1706           // <Operation> is either nullptr, which means there's no use of the
1707           // actual value, or was already defined, which means it is scheduled
1708           // *AFTER* this branch).
1709           Node* const node = value->InputAt(0);
1710           Node* const result = NodeProperties::FindProjection(node, 0);
1711           if (!result || IsDefined(result)) {
1712             switch (node->opcode()) {
1713               case IrOpcode::kInt32AddWithOverflow:
1714                 cont->OverwriteAndNegateIfEqual(kOverflow);
1715                 return VisitBinop(this, node, kMipsAddOvf, cont);
1716               case IrOpcode::kInt32SubWithOverflow:
1717                 cont->OverwriteAndNegateIfEqual(kOverflow);
1718                 return VisitBinop(this, node, kMipsSubOvf, cont);
1719               case IrOpcode::kInt32MulWithOverflow:
1720                 cont->OverwriteAndNegateIfEqual(kOverflow);
1721                 return VisitBinop(this, node, kMipsMulOvf, cont);
1722               default:
1723                 break;
1724             }
1725           }
1726         }
1727         break;
1728       case IrOpcode::kWord32And:
1729         return VisitWordCompare(this, value, kMipsTst, cont, true);
1730       case IrOpcode::kStackPointerGreaterThan:
1731         cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
1732         return VisitStackPointerGreaterThan(value, cont);
1733       default:
1734         break;
1735     }
1736   }
1737 
1738   // Continuation could not be combined with a compare, emit compare against 0.
1739   MipsOperandGenerator g(this);
1740   InstructionOperand const value_operand = g.UseRegister(value);
1741   EmitWithContinuation(kMipsCmp, value_operand, g.TempImmediate(0), cont);
1742 }
1743 
VisitSwitch(Node * node,const SwitchInfo & sw)1744 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
1745   MipsOperandGenerator g(this);
1746   InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
1747 
1748   // Emit either ArchTableSwitch or ArchBinarySearchSwitch.
1749   if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
1750     static const size_t kMaxTableSwitchValueRange = 2 << 16;
1751     size_t table_space_cost = 9 + sw.value_range();
1752     size_t table_time_cost = 3;
1753     size_t lookup_space_cost = 2 + 2 * sw.case_count();
1754     size_t lookup_time_cost = sw.case_count();
1755     if (sw.case_count() > 0 &&
1756         table_space_cost + 3 * table_time_cost <=
1757             lookup_space_cost + 3 * lookup_time_cost &&
1758         sw.min_value() > std::numeric_limits<int32_t>::min() &&
1759         sw.value_range() <= kMaxTableSwitchValueRange) {
1760       InstructionOperand index_operand = value_operand;
1761       if (sw.min_value()) {
1762         index_operand = g.TempRegister();
1763         Emit(kMipsSub, index_operand, value_operand,
1764              g.TempImmediate(sw.min_value()));
1765       }
1766       // Generate a table lookup.
1767       return EmitTableSwitch(sw, index_operand);
1768     }
1769   }
1770 
1771   // Generate a tree of conditional jumps.
1772   return EmitBinarySearchSwitch(std::move(sw), value_operand);
1773 }
1774 
VisitWord32Equal(Node * const node)1775 void InstructionSelector::VisitWord32Equal(Node* const node) {
1776   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1777   Int32BinopMatcher m(node);
1778   if (m.right().Is(0)) {
1779     return VisitWordCompareZero(m.node(), m.left().node(), &cont);
1780   }
1781   VisitWordCompare(this, node, &cont);
1782 }
1783 
VisitInt32LessThan(Node * node)1784 void InstructionSelector::VisitInt32LessThan(Node* node) {
1785   FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
1786   VisitWordCompare(this, node, &cont);
1787 }
1788 
VisitInt32LessThanOrEqual(Node * node)1789 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1790   FlagsContinuation cont =
1791       FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
1792   VisitWordCompare(this, node, &cont);
1793 }
1794 
VisitUint32LessThan(Node * node)1795 void InstructionSelector::VisitUint32LessThan(Node* node) {
1796   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1797   VisitWordCompare(this, node, &cont);
1798 }
1799 
VisitUint32LessThanOrEqual(Node * node)1800 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1801   FlagsContinuation cont =
1802       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1803   VisitWordCompare(this, node, &cont);
1804 }
1805 
VisitInt32AddWithOverflow(Node * node)1806 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1807   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1808     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1809     return VisitBinop(this, node, kMipsAddOvf, &cont);
1810   }
1811   FlagsContinuation cont;
1812   VisitBinop(this, node, kMipsAddOvf, &cont);
1813 }
1814 
VisitInt32SubWithOverflow(Node * node)1815 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1816   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1817     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1818     return VisitBinop(this, node, kMipsSubOvf, &cont);
1819   }
1820   FlagsContinuation cont;
1821   VisitBinop(this, node, kMipsSubOvf, &cont);
1822 }
1823 
VisitInt32MulWithOverflow(Node * node)1824 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
1825   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1826     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1827     return VisitBinop(this, node, kMipsMulOvf, &cont);
1828   }
1829   FlagsContinuation cont;
1830   VisitBinop(this, node, kMipsMulOvf, &cont);
1831 }
1832 
VisitFloat32Equal(Node * node)1833 void InstructionSelector::VisitFloat32Equal(Node* node) {
1834   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1835   VisitFloat32Compare(this, node, &cont);
1836 }
1837 
VisitFloat32LessThan(Node * node)1838 void InstructionSelector::VisitFloat32LessThan(Node* node) {
1839   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1840   VisitFloat32Compare(this, node, &cont);
1841 }
1842 
VisitFloat32LessThanOrEqual(Node * node)1843 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
1844   FlagsContinuation cont =
1845       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1846   VisitFloat32Compare(this, node, &cont);
1847 }
1848 
VisitFloat64Equal(Node * node)1849 void InstructionSelector::VisitFloat64Equal(Node* node) {
1850   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1851   VisitFloat64Compare(this, node, &cont);
1852 }
1853 
VisitFloat64LessThan(Node * node)1854 void InstructionSelector::VisitFloat64LessThan(Node* node) {
1855   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1856   VisitFloat64Compare(this, node, &cont);
1857 }
1858 
VisitFloat64LessThanOrEqual(Node * node)1859 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
1860   FlagsContinuation cont =
1861       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1862   VisitFloat64Compare(this, node, &cont);
1863 }
1864 
VisitFloat64ExtractLowWord32(Node * node)1865 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
1866   MipsOperandGenerator g(this);
1867   Emit(kMipsFloat64ExtractLowWord32, g.DefineAsRegister(node),
1868        g.UseRegister(node->InputAt(0)));
1869 }
1870 
VisitFloat64ExtractHighWord32(Node * node)1871 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
1872   MipsOperandGenerator g(this);
1873   Emit(kMipsFloat64ExtractHighWord32, g.DefineAsRegister(node),
1874        g.UseRegister(node->InputAt(0)));
1875 }
1876 
VisitFloat64InsertLowWord32(Node * node)1877 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
1878   MipsOperandGenerator g(this);
1879   Node* left = node->InputAt(0);
1880   Node* right = node->InputAt(1);
1881   Emit(kMipsFloat64InsertLowWord32, g.DefineSameAsFirst(node),
1882        g.UseRegister(left), g.UseRegister(right));
1883 }
1884 
VisitFloat64InsertHighWord32(Node * node)1885 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
1886   MipsOperandGenerator g(this);
1887   Node* left = node->InputAt(0);
1888   Node* right = node->InputAt(1);
1889   Emit(kMipsFloat64InsertHighWord32, g.DefineSameAsFirst(node),
1890        g.UseRegister(left), g.UseRegister(right));
1891 }
1892 
VisitFloat64SilenceNaN(Node * node)1893 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
1894   MipsOperandGenerator g(this);
1895   Node* left = node->InputAt(0);
1896   InstructionOperand temps[] = {g.TempRegister()};
1897   Emit(kMipsFloat64SilenceNaN, g.DefineSameAsFirst(node), g.UseRegister(left),
1898        arraysize(temps), temps);
1899 }
1900 
VisitMemoryBarrier(Node * node)1901 void InstructionSelector::VisitMemoryBarrier(Node* node) {
1902   MipsOperandGenerator g(this);
1903   Emit(kMipsSync, g.NoOutput());
1904 }
1905 
VisitWord32AtomicLoad(Node * node)1906 void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
1907   // TODO(mips-dev): Confirm whether there is any mips32 chip in use and
1908   // support atomic loads of tagged values with barriers.
1909   AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
1910   LoadRepresentation load_rep = atomic_load_params.representation();
1911   MipsOperandGenerator g(this);
1912   Node* base = node->InputAt(0);
1913   Node* index = node->InputAt(1);
1914   ArchOpcode opcode;
1915   switch (load_rep.representation()) {
1916     case MachineRepresentation::kWord8:
1917       opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
1918       break;
1919     case MachineRepresentation::kWord16:
1920       opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
1921       break;
1922     case MachineRepresentation::kTaggedSigned:   // Fall through.
1923     case MachineRepresentation::kTaggedPointer:  // Fall through.
1924     case MachineRepresentation::kTagged:
1925     case MachineRepresentation::kWord32:
1926       opcode = kAtomicLoadWord32;
1927       break;
1928     default:
1929       UNREACHABLE();
1930   }
1931 
1932   if (g.CanBeImmediate(index, opcode)) {
1933     Emit(opcode | AddressingModeField::encode(kMode_MRI),
1934          g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
1935   } else {
1936     InstructionOperand addr_reg = g.TempRegister();
1937     Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
1938          g.UseRegister(index), g.UseRegister(base));
1939     // Emit desired load opcode, using temp addr_reg.
1940     Emit(opcode | AddressingModeField::encode(kMode_MRI),
1941          g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
1942   }
1943 }
1944 
VisitWord32AtomicStore(Node * node)1945 void InstructionSelector::VisitWord32AtomicStore(Node* node) {
1946   // TODO(mips-dev): Confirm whether there is any mips32 chip in use and
1947   // support atomic stores of tagged values with barriers.
1948   AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
1949   MachineRepresentation rep = store_params.representation();
1950   MipsOperandGenerator g(this);
1951   Node* base = node->InputAt(0);
1952   Node* index = node->InputAt(1);
1953   Node* value = node->InputAt(2);
1954   ArchOpcode opcode;
1955   switch (rep) {
1956     case MachineRepresentation::kWord8:
1957       opcode = kAtomicStoreWord8;
1958       break;
1959     case MachineRepresentation::kWord16:
1960       opcode = kAtomicStoreWord16;
1961       break;
1962     case MachineRepresentation::kTaggedSigned:   // Fall through.
1963     case MachineRepresentation::kTaggedPointer:  // Fall through.
1964     case MachineRepresentation::kTagged:
1965     case MachineRepresentation::kWord32:
1966       opcode = kAtomicStoreWord32;
1967       break;
1968     default:
1969       UNREACHABLE();
1970   }
1971 
1972   if (g.CanBeImmediate(index, opcode)) {
1973     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1974          g.UseRegister(base), g.UseImmediate(index),
1975          g.UseRegisterOrImmediateZero(value));
1976   } else {
1977     InstructionOperand addr_reg = g.TempRegister();
1978     Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
1979          g.UseRegister(index), g.UseRegister(base));
1980     // Emit desired store opcode, using temp addr_reg.
1981     Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1982          addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
1983   }
1984 }
1985 
VisitWord32AtomicExchange(Node * node)1986 void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
1987   MipsOperandGenerator g(this);
1988   Node* base = node->InputAt(0);
1989   Node* index = node->InputAt(1);
1990   Node* value = node->InputAt(2);
1991   ArchOpcode opcode;
1992   MachineType type = AtomicOpType(node->op());
1993   if (type == MachineType::Int8()) {
1994     opcode = kAtomicExchangeInt8;
1995   } else if (type == MachineType::Uint8()) {
1996     opcode = kAtomicExchangeUint8;
1997   } else if (type == MachineType::Int16()) {
1998     opcode = kAtomicExchangeInt16;
1999   } else if (type == MachineType::Uint16()) {
2000     opcode = kAtomicExchangeUint16;
2001   } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2002     opcode = kAtomicExchangeWord32;
2003   } else {
2004     UNREACHABLE();
2005   }
2006 
2007   AddressingMode addressing_mode = kMode_MRI;
2008   InstructionOperand inputs[3];
2009   size_t input_count = 0;
2010   inputs[input_count++] = g.UseUniqueRegister(base);
2011   inputs[input_count++] = g.UseUniqueRegister(index);
2012   inputs[input_count++] = g.UseUniqueRegister(value);
2013   InstructionOperand outputs[1];
2014   outputs[0] = g.UseUniqueRegister(node);
2015   InstructionOperand temp[3];
2016   temp[0] = g.TempRegister();
2017   temp[1] = g.TempRegister();
2018   temp[2] = g.TempRegister();
2019   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2020   Emit(code, 1, outputs, input_count, inputs, 3, temp);
2021 }
2022 
VisitWord32AtomicCompareExchange(Node * node)2023 void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
2024   MipsOperandGenerator g(this);
2025   Node* base = node->InputAt(0);
2026   Node* index = node->InputAt(1);
2027   Node* old_value = node->InputAt(2);
2028   Node* new_value = node->InputAt(3);
2029   ArchOpcode opcode;
2030   MachineType type = AtomicOpType(node->op());
2031   if (type == MachineType::Int8()) {
2032     opcode = kAtomicCompareExchangeInt8;
2033   } else if (type == MachineType::Uint8()) {
2034     opcode = kAtomicCompareExchangeUint8;
2035   } else if (type == MachineType::Int16()) {
2036     opcode = kAtomicCompareExchangeInt16;
2037   } else if (type == MachineType::Uint16()) {
2038     opcode = kAtomicCompareExchangeUint16;
2039   } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2040     opcode = kAtomicCompareExchangeWord32;
2041   } else {
2042     UNREACHABLE();
2043   }
2044 
2045   AddressingMode addressing_mode = kMode_MRI;
2046   InstructionOperand inputs[4];
2047   size_t input_count = 0;
2048   inputs[input_count++] = g.UseUniqueRegister(base);
2049   inputs[input_count++] = g.UseUniqueRegister(index);
2050   inputs[input_count++] = g.UseUniqueRegister(old_value);
2051   inputs[input_count++] = g.UseUniqueRegister(new_value);
2052   InstructionOperand outputs[1];
2053   outputs[0] = g.UseUniqueRegister(node);
2054   InstructionOperand temp[3];
2055   temp[0] = g.TempRegister();
2056   temp[1] = g.TempRegister();
2057   temp[2] = g.TempRegister();
2058   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2059   Emit(code, 1, outputs, input_count, inputs, 3, temp);
2060 }
2061 
VisitWord32AtomicBinaryOperation(Node * node,ArchOpcode int8_op,ArchOpcode uint8_op,ArchOpcode int16_op,ArchOpcode uint16_op,ArchOpcode word32_op)2062 void InstructionSelector::VisitWord32AtomicBinaryOperation(
2063     Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
2064     ArchOpcode uint16_op, ArchOpcode word32_op) {
2065   MipsOperandGenerator g(this);
2066   Node* base = node->InputAt(0);
2067   Node* index = node->InputAt(1);
2068   Node* value = node->InputAt(2);
2069   ArchOpcode opcode;
2070   MachineType type = AtomicOpType(node->op());
2071   if (type == MachineType::Int8()) {
2072     opcode = int8_op;
2073   } else if (type == MachineType::Uint8()) {
2074     opcode = uint8_op;
2075   } else if (type == MachineType::Int16()) {
2076     opcode = int16_op;
2077   } else if (type == MachineType::Uint16()) {
2078     opcode = uint16_op;
2079   } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2080     opcode = word32_op;
2081   } else {
2082     UNREACHABLE();
2083   }
2084 
2085   AddressingMode addressing_mode = kMode_MRI;
2086   InstructionOperand inputs[3];
2087   size_t input_count = 0;
2088   inputs[input_count++] = g.UseUniqueRegister(base);
2089   inputs[input_count++] = g.UseUniqueRegister(index);
2090   inputs[input_count++] = g.UseUniqueRegister(value);
2091   InstructionOperand outputs[1];
2092   outputs[0] = g.UseUniqueRegister(node);
2093   InstructionOperand temps[4];
2094   temps[0] = g.TempRegister();
2095   temps[1] = g.TempRegister();
2096   temps[2] = g.TempRegister();
2097   temps[3] = g.TempRegister();
2098   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2099   Emit(code, 1, outputs, input_count, inputs, 4, temps);
2100 }
2101 
2102 #define VISIT_ATOMIC_BINOP(op)                                           \
2103   void InstructionSelector::VisitWord32Atomic##op(Node* node) {          \
2104     VisitWord32AtomicBinaryOperation(                                    \
2105         node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
2106         kAtomic##op##Uint16, kAtomic##op##Word32);                       \
2107   }
2108 VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)2109 VISIT_ATOMIC_BINOP(Sub)
2110 VISIT_ATOMIC_BINOP(And)
2111 VISIT_ATOMIC_BINOP(Or)
2112 VISIT_ATOMIC_BINOP(Xor)
2113 #undef VISIT_ATOMIC_BINOP
2114 
2115 void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
2116   UNREACHABLE();
2117 }
2118 
VisitInt64AbsWithOverflow(Node * node)2119 void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
2120   UNREACHABLE();
2121 }
2122 
2123 #define SIMD_TYPE_LIST(V) \
2124   V(F32x4)                \
2125   V(I32x4)                \
2126   V(I16x8)                \
2127   V(I8x16)
2128 
2129 #define SIMD_UNOP_LIST(V)                                      \
2130   V(F64x2Abs, kMipsF64x2Abs)                                   \
2131   V(F64x2Neg, kMipsF64x2Neg)                                   \
2132   V(F64x2Sqrt, kMipsF64x2Sqrt)                                 \
2133   V(F64x2Ceil, kMipsF64x2Ceil)                                 \
2134   V(F64x2Floor, kMipsF64x2Floor)                               \
2135   V(F64x2Trunc, kMipsF64x2Trunc)                               \
2136   V(F64x2NearestInt, kMipsF64x2NearestInt)                     \
2137   V(F64x2ConvertLowI32x4S, kMipsF64x2ConvertLowI32x4S)         \
2138   V(F64x2ConvertLowI32x4U, kMipsF64x2ConvertLowI32x4U)         \
2139   V(F64x2PromoteLowF32x4, kMipsF64x2PromoteLowF32x4)           \
2140   V(I64x2Neg, kMipsI64x2Neg)                                   \
2141   V(I64x2BitMask, kMipsI64x2BitMask)                           \
2142   V(I64x2Abs, kMipsI64x2Abs)                                   \
2143   V(I64x2SConvertI32x4Low, kMipsI64x2SConvertI32x4Low)         \
2144   V(I64x2SConvertI32x4High, kMipsI64x2SConvertI32x4High)       \
2145   V(I64x2UConvertI32x4Low, kMipsI64x2UConvertI32x4Low)         \
2146   V(I64x2UConvertI32x4High, kMipsI64x2UConvertI32x4High)       \
2147   V(F32x4SConvertI32x4, kMipsF32x4SConvertI32x4)               \
2148   V(F32x4UConvertI32x4, kMipsF32x4UConvertI32x4)               \
2149   V(F32x4Abs, kMipsF32x4Abs)                                   \
2150   V(F32x4Neg, kMipsF32x4Neg)                                   \
2151   V(F32x4Sqrt, kMipsF32x4Sqrt)                                 \
2152   V(F32x4RecipApprox, kMipsF32x4RecipApprox)                   \
2153   V(F32x4RecipSqrtApprox, kMipsF32x4RecipSqrtApprox)           \
2154   V(F32x4Ceil, kMipsF32x4Ceil)                                 \
2155   V(F32x4Floor, kMipsF32x4Floor)                               \
2156   V(F32x4Trunc, kMipsF32x4Trunc)                               \
2157   V(F32x4NearestInt, kMipsF32x4NearestInt)                     \
2158   V(F32x4DemoteF64x2Zero, kMipsF32x4DemoteF64x2Zero)           \
2159   V(I32x4SConvertF32x4, kMipsI32x4SConvertF32x4)               \
2160   V(I32x4UConvertF32x4, kMipsI32x4UConvertF32x4)               \
2161   V(I32x4Neg, kMipsI32x4Neg)                                   \
2162   V(I32x4BitMask, kMipsI32x4BitMask)                           \
2163   V(I32x4SConvertI16x8Low, kMipsI32x4SConvertI16x8Low)         \
2164   V(I32x4SConvertI16x8High, kMipsI32x4SConvertI16x8High)       \
2165   V(I32x4UConvertI16x8Low, kMipsI32x4UConvertI16x8Low)         \
2166   V(I32x4UConvertI16x8High, kMipsI32x4UConvertI16x8High)       \
2167   V(I32x4ExtAddPairwiseI16x8S, kMipsI32x4ExtAddPairwiseI16x8S) \
2168   V(I32x4ExtAddPairwiseI16x8U, kMipsI32x4ExtAddPairwiseI16x8U) \
2169   V(I32x4TruncSatF64x2SZero, kMipsI32x4TruncSatF64x2SZero)     \
2170   V(I32x4TruncSatF64x2UZero, kMipsI32x4TruncSatF64x2UZero)     \
2171   V(I16x8Neg, kMipsI16x8Neg)                                   \
2172   V(I16x8BitMask, kMipsI16x8BitMask)                           \
2173   V(I16x8SConvertI8x16Low, kMipsI16x8SConvertI8x16Low)         \
2174   V(I16x8SConvertI8x16High, kMipsI16x8SConvertI8x16High)       \
2175   V(I16x8UConvertI8x16Low, kMipsI16x8UConvertI8x16Low)         \
2176   V(I16x8UConvertI8x16High, kMipsI16x8UConvertI8x16High)       \
2177   V(I16x8ExtAddPairwiseI8x16S, kMipsI16x8ExtAddPairwiseI8x16S) \
2178   V(I16x8ExtAddPairwiseI8x16U, kMipsI16x8ExtAddPairwiseI8x16U) \
2179   V(I8x16Neg, kMipsI8x16Neg)                                   \
2180   V(I8x16Popcnt, kMipsI8x16Popcnt)                             \
2181   V(I8x16BitMask, kMipsI8x16BitMask)                           \
2182   V(S128Not, kMipsS128Not)                                     \
2183   V(I64x2AllTrue, kMipsI64x2AllTrue)                           \
2184   V(I32x4AllTrue, kMipsI32x4AllTrue)                           \
2185   V(I16x8AllTrue, kMipsI16x8AllTrue)                           \
2186   V(I8x16AllTrue, kMipsI8x16AllTrue)                           \
2187   V(V128AnyTrue, kMipsV128AnyTrue)
2188 
2189 #define SIMD_SHIFT_OP_LIST(V) \
2190   V(I64x2Shl)                 \
2191   V(I64x2ShrS)                \
2192   V(I64x2ShrU)                \
2193   V(I32x4Shl)                 \
2194   V(I32x4ShrS)                \
2195   V(I32x4ShrU)                \
2196   V(I16x8Shl)                 \
2197   V(I16x8ShrS)                \
2198   V(I16x8ShrU)                \
2199   V(I8x16Shl)                 \
2200   V(I8x16ShrS)                \
2201   V(I8x16ShrU)
2202 
2203 #define SIMD_BINOP_LIST(V)                             \
2204   V(F64x2Add, kMipsF64x2Add)                           \
2205   V(F64x2Sub, kMipsF64x2Sub)                           \
2206   V(F64x2Mul, kMipsF64x2Mul)                           \
2207   V(F64x2Div, kMipsF64x2Div)                           \
2208   V(F64x2Min, kMipsF64x2Min)                           \
2209   V(F64x2Max, kMipsF64x2Max)                           \
2210   V(F64x2Eq, kMipsF64x2Eq)                             \
2211   V(F64x2Ne, kMipsF64x2Ne)                             \
2212   V(F64x2Lt, kMipsF64x2Lt)                             \
2213   V(F64x2Le, kMipsF64x2Le)                             \
2214   V(I64x2Eq, kMipsI64x2Eq)                             \
2215   V(I64x2Ne, kMipsI64x2Ne)                             \
2216   V(I64x2Add, kMipsI64x2Add)                           \
2217   V(I64x2Sub, kMipsI64x2Sub)                           \
2218   V(I64x2Mul, kMipsI64x2Mul)                           \
2219   V(I64x2GtS, kMipsI64x2GtS)                           \
2220   V(I64x2GeS, kMipsI64x2GeS)                           \
2221   V(I64x2ExtMulLowI32x4S, kMipsI64x2ExtMulLowI32x4S)   \
2222   V(I64x2ExtMulHighI32x4S, kMipsI64x2ExtMulHighI32x4S) \
2223   V(I64x2ExtMulLowI32x4U, kMipsI64x2ExtMulLowI32x4U)   \
2224   V(I64x2ExtMulHighI32x4U, kMipsI64x2ExtMulHighI32x4U) \
2225   V(F32x4Add, kMipsF32x4Add)                           \
2226   V(F32x4Sub, kMipsF32x4Sub)                           \
2227   V(F32x4Mul, kMipsF32x4Mul)                           \
2228   V(F32x4Div, kMipsF32x4Div)                           \
2229   V(F32x4Max, kMipsF32x4Max)                           \
2230   V(F32x4Min, kMipsF32x4Min)                           \
2231   V(F32x4Eq, kMipsF32x4Eq)                             \
2232   V(F32x4Ne, kMipsF32x4Ne)                             \
2233   V(F32x4Lt, kMipsF32x4Lt)                             \
2234   V(F32x4Le, kMipsF32x4Le)                             \
2235   V(I32x4Add, kMipsI32x4Add)                           \
2236   V(I32x4Sub, kMipsI32x4Sub)                           \
2237   V(I32x4Mul, kMipsI32x4Mul)                           \
2238   V(I32x4MaxS, kMipsI32x4MaxS)                         \
2239   V(I32x4MinS, kMipsI32x4MinS)                         \
2240   V(I32x4MaxU, kMipsI32x4MaxU)                         \
2241   V(I32x4MinU, kMipsI32x4MinU)                         \
2242   V(I32x4Eq, kMipsI32x4Eq)                             \
2243   V(I32x4Ne, kMipsI32x4Ne)                             \
2244   V(I32x4GtS, kMipsI32x4GtS)                           \
2245   V(I32x4GeS, kMipsI32x4GeS)                           \
2246   V(I32x4GtU, kMipsI32x4GtU)                           \
2247   V(I32x4GeU, kMipsI32x4GeU)                           \
2248   V(I32x4Abs, kMipsI32x4Abs)                           \
2249   V(I32x4DotI16x8S, kMipsI32x4DotI16x8S)               \
2250   V(I32x4ExtMulLowI16x8S, kMipsI32x4ExtMulLowI16x8S)   \
2251   V(I32x4ExtMulHighI16x8S, kMipsI32x4ExtMulHighI16x8S) \
2252   V(I32x4ExtMulLowI16x8U, kMipsI32x4ExtMulLowI16x8U)   \
2253   V(I32x4ExtMulHighI16x8U, kMipsI32x4ExtMulHighI16x8U) \
2254   V(I16x8Add, kMipsI16x8Add)                           \
2255   V(I16x8AddSatS, kMipsI16x8AddSatS)                   \
2256   V(I16x8AddSatU, kMipsI16x8AddSatU)                   \
2257   V(I16x8Sub, kMipsI16x8Sub)                           \
2258   V(I16x8SubSatS, kMipsI16x8SubSatS)                   \
2259   V(I16x8SubSatU, kMipsI16x8SubSatU)                   \
2260   V(I16x8Mul, kMipsI16x8Mul)                           \
2261   V(I16x8MaxS, kMipsI16x8MaxS)                         \
2262   V(I16x8MinS, kMipsI16x8MinS)                         \
2263   V(I16x8MaxU, kMipsI16x8MaxU)                         \
2264   V(I16x8MinU, kMipsI16x8MinU)                         \
2265   V(I16x8Eq, kMipsI16x8Eq)                             \
2266   V(I16x8Ne, kMipsI16x8Ne)                             \
2267   V(I16x8GtS, kMipsI16x8GtS)                           \
2268   V(I16x8GeS, kMipsI16x8GeS)                           \
2269   V(I16x8GtU, kMipsI16x8GtU)                           \
2270   V(I16x8GeU, kMipsI16x8GeU)                           \
2271   V(I16x8SConvertI32x4, kMipsI16x8SConvertI32x4)       \
2272   V(I16x8UConvertI32x4, kMipsI16x8UConvertI32x4)       \
2273   V(I16x8Q15MulRSatS, kMipsI16x8Q15MulRSatS)           \
2274   V(I16x8ExtMulLowI8x16S, kMipsI16x8ExtMulLowI8x16S)   \
2275   V(I16x8ExtMulHighI8x16S, kMipsI16x8ExtMulHighI8x16S) \
2276   V(I16x8ExtMulLowI8x16U, kMipsI16x8ExtMulLowI8x16U)   \
2277   V(I16x8ExtMulHighI8x16U, kMipsI16x8ExtMulHighI8x16U) \
2278   V(I16x8RoundingAverageU, kMipsI16x8RoundingAverageU) \
2279   V(I16x8Abs, kMipsI16x8Abs)                           \
2280   V(I8x16Add, kMipsI8x16Add)                           \
2281   V(I8x16AddSatS, kMipsI8x16AddSatS)                   \
2282   V(I8x16AddSatU, kMipsI8x16AddSatU)                   \
2283   V(I8x16Sub, kMipsI8x16Sub)                           \
2284   V(I8x16SubSatS, kMipsI8x16SubSatS)                   \
2285   V(I8x16SubSatU, kMipsI8x16SubSatU)                   \
2286   V(I8x16MaxS, kMipsI8x16MaxS)                         \
2287   V(I8x16MinS, kMipsI8x16MinS)                         \
2288   V(I8x16MaxU, kMipsI8x16MaxU)                         \
2289   V(I8x16MinU, kMipsI8x16MinU)                         \
2290   V(I8x16Eq, kMipsI8x16Eq)                             \
2291   V(I8x16Ne, kMipsI8x16Ne)                             \
2292   V(I8x16GtS, kMipsI8x16GtS)                           \
2293   V(I8x16GeS, kMipsI8x16GeS)                           \
2294   V(I8x16GtU, kMipsI8x16GtU)                           \
2295   V(I8x16GeU, kMipsI8x16GeU)                           \
2296   V(I8x16RoundingAverageU, kMipsI8x16RoundingAverageU) \
2297   V(I8x16SConvertI16x8, kMipsI8x16SConvertI16x8)       \
2298   V(I8x16UConvertI16x8, kMipsI8x16UConvertI16x8)       \
2299   V(I8x16Abs, kMipsI8x16Abs)                           \
2300   V(S128And, kMipsS128And)                             \
2301   V(S128Or, kMipsS128Or)                               \
2302   V(S128Xor, kMipsS128Xor)                             \
2303   V(S128AndNot, kMipsS128AndNot)
2304 
VisitS128Const(Node * node)2305 void InstructionSelector::VisitS128Const(Node* node) { UNIMPLEMENTED(); }
2306 
VisitS128Zero(Node * node)2307 void InstructionSelector::VisitS128Zero(Node* node) {
2308   MipsOperandGenerator g(this);
2309   Emit(kMipsS128Zero, g.DefineSameAsFirst(node));
2310 }
2311 
2312 #define SIMD_VISIT_SPLAT(Type)                               \
2313   void InstructionSelector::Visit##Type##Splat(Node* node) { \
2314     VisitRR(this, kMips##Type##Splat, node);                 \
2315   }
2316 SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
SIMD_VISIT_SPLAT(F64x2)2317 SIMD_VISIT_SPLAT(F64x2)
2318 #undef SIMD_VISIT_SPLAT
2319 
2320 #define SIMD_VISIT_EXTRACT_LANE(Type, Sign)                              \
2321   void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
2322     VisitRRI(this, kMips##Type##ExtractLane##Sign, node);                \
2323   }
2324 SIMD_VISIT_EXTRACT_LANE(F64x2, )
2325 SIMD_VISIT_EXTRACT_LANE(F32x4, )
2326 SIMD_VISIT_EXTRACT_LANE(I32x4, )
2327 SIMD_VISIT_EXTRACT_LANE(I16x8, U)
2328 SIMD_VISIT_EXTRACT_LANE(I16x8, S)
2329 SIMD_VISIT_EXTRACT_LANE(I8x16, U)
2330 SIMD_VISIT_EXTRACT_LANE(I8x16, S)
2331 #undef SIMD_VISIT_EXTRACT_LANE
2332 
2333 #define SIMD_VISIT_REPLACE_LANE(Type)                              \
2334   void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
2335     VisitRRIR(this, kMips##Type##ReplaceLane, node);               \
2336   }
2337 SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
2338 SIMD_VISIT_REPLACE_LANE(F64x2)
2339 #undef SIMD_VISIT_REPLACE_LANE
2340 
2341 #define SIMD_VISIT_UNOP(Name, instruction)            \
2342   void InstructionSelector::Visit##Name(Node* node) { \
2343     VisitRR(this, instruction, node);                 \
2344   }
2345 SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
2346 #undef SIMD_VISIT_UNOP
2347 
2348 #define SIMD_VISIT_SHIFT_OP(Name)                     \
2349   void InstructionSelector::Visit##Name(Node* node) { \
2350     VisitRRI(this, kMips##Name, node);                \
2351   }
2352 SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
2353 #undef SIMD_VISIT_SHIFT_OP
2354 
2355 #define SIMD_VISIT_BINOP(Name, instruction)           \
2356   void InstructionSelector::Visit##Name(Node* node) { \
2357     VisitRRR(this, instruction, node);                \
2358   }
2359 SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
2360 #undef SIMD_VISIT_BINOP
2361 
2362 void InstructionSelector::VisitS128Select(Node* node) {
2363   VisitRRRR(this, kMipsS128Select, node);
2364 }
2365 
2366 #if V8_ENABLE_WEBASSEMBLY
2367 namespace {
2368 
2369 struct ShuffleEntry {
2370   uint8_t shuffle[kSimd128Size];
2371   ArchOpcode opcode;
2372 };
2373 
2374 static const ShuffleEntry arch_shuffles[] = {
2375     {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
2376      kMipsS32x4InterleaveRight},
2377     {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
2378      kMipsS32x4InterleaveLeft},
2379     {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
2380      kMipsS32x4PackEven},
2381     {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
2382      kMipsS32x4PackOdd},
2383     {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
2384      kMipsS32x4InterleaveEven},
2385     {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
2386      kMipsS32x4InterleaveOdd},
2387 
2388     {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
2389      kMipsS16x8InterleaveRight},
2390     {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
2391      kMipsS16x8InterleaveLeft},
2392     {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
2393      kMipsS16x8PackEven},
2394     {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
2395      kMipsS16x8PackOdd},
2396     {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
2397      kMipsS16x8InterleaveEven},
2398     {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
2399      kMipsS16x8InterleaveOdd},
2400     {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9}, kMipsS16x4Reverse},
2401     {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13}, kMipsS16x2Reverse},
2402 
2403     {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
2404      kMipsS8x16InterleaveRight},
2405     {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
2406      kMipsS8x16InterleaveLeft},
2407     {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
2408      kMipsS8x16PackEven},
2409     {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
2410      kMipsS8x16PackOdd},
2411     {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
2412      kMipsS8x16InterleaveEven},
2413     {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
2414      kMipsS8x16InterleaveOdd},
2415     {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}, kMipsS8x8Reverse},
2416     {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, kMipsS8x4Reverse},
2417     {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}, kMipsS8x2Reverse}};
2418 
TryMatchArchShuffle(const uint8_t * shuffle,const ShuffleEntry * table,size_t num_entries,bool is_swizzle,ArchOpcode * opcode)2419 bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
2420                          size_t num_entries, bool is_swizzle,
2421                          ArchOpcode* opcode) {
2422   uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
2423   for (size_t i = 0; i < num_entries; ++i) {
2424     const ShuffleEntry& entry = table[i];
2425     int j = 0;
2426     for (; j < kSimd128Size; ++j) {
2427       if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
2428         break;
2429       }
2430     }
2431     if (j == kSimd128Size) {
2432       *opcode = entry.opcode;
2433       return true;
2434     }
2435   }
2436   return false;
2437 }
2438 
2439 }  // namespace
2440 
VisitI8x16Shuffle(Node * node)2441 void InstructionSelector::VisitI8x16Shuffle(Node* node) {
2442   uint8_t shuffle[kSimd128Size];
2443   bool is_swizzle;
2444   CanonicalizeShuffle(node, shuffle, &is_swizzle);
2445   uint8_t shuffle32x4[4];
2446   ArchOpcode opcode;
2447   if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
2448                           is_swizzle, &opcode)) {
2449     VisitRRR(this, opcode, node);
2450     return;
2451   }
2452   Node* input0 = node->InputAt(0);
2453   Node* input1 = node->InputAt(1);
2454   uint8_t offset;
2455   MipsOperandGenerator g(this);
2456   if (wasm::SimdShuffle::TryMatchConcat(shuffle, &offset)) {
2457     Emit(kMipsS8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1),
2458          g.UseRegister(input0), g.UseImmediate(offset));
2459     return;
2460   }
2461   if (wasm::SimdShuffle::TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
2462     Emit(kMipsS32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
2463          g.UseRegister(input1),
2464          g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle32x4)));
2465     return;
2466   }
2467   Emit(kMipsI8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
2468        g.UseRegister(input1),
2469        g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle)),
2470        g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 4)),
2471        g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 8)),
2472        g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 12)));
2473 }
2474 #else
VisitI8x16Shuffle(Node * node)2475 void InstructionSelector::VisitI8x16Shuffle(Node* node) { UNREACHABLE(); }
2476 #endif  // V8_ENABLE_WEBASSEMBLY
2477 
VisitI8x16Swizzle(Node * node)2478 void InstructionSelector::VisitI8x16Swizzle(Node* node) {
2479   MipsOperandGenerator g(this);
2480   InstructionOperand temps[] = {g.TempSimd128Register()};
2481   // We don't want input 0 or input 1 to be the same as output, since we will
2482   // modify output before do the calculation.
2483   Emit(kMipsI8x16Swizzle, g.DefineAsRegister(node),
2484        g.UseUniqueRegister(node->InputAt(0)),
2485        g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
2486 }
2487 
VisitSignExtendWord8ToInt32(Node * node)2488 void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
2489   MipsOperandGenerator g(this);
2490   Emit(kMipsSeb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
2491 }
2492 
VisitSignExtendWord16ToInt32(Node * node)2493 void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
2494   MipsOperandGenerator g(this);
2495   Emit(kMipsSeh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
2496 }
2497 
VisitF32x4Pmin(Node * node)2498 void InstructionSelector::VisitF32x4Pmin(Node* node) {
2499   VisitUniqueRRR(this, kMipsF32x4Pmin, node);
2500 }
2501 
VisitF32x4Pmax(Node * node)2502 void InstructionSelector::VisitF32x4Pmax(Node* node) {
2503   VisitUniqueRRR(this, kMipsF32x4Pmax, node);
2504 }
2505 
VisitF64x2Pmin(Node * node)2506 void InstructionSelector::VisitF64x2Pmin(Node* node) {
2507   VisitUniqueRRR(this, kMipsF64x2Pmin, node);
2508 }
2509 
VisitF64x2Pmax(Node * node)2510 void InstructionSelector::VisitF64x2Pmax(Node* node) {
2511   VisitUniqueRRR(this, kMipsF64x2Pmax, node);
2512 }
2513 
AddOutputToSelectContinuation(OperandGenerator * g,int first_input_index,Node * node)2514 void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
2515                                                         int first_input_index,
2516                                                         Node* node) {
2517   UNREACHABLE();
2518 }
2519 
2520 // static
2521 MachineOperatorBuilder::Flags
SupportedMachineOperatorFlags()2522 InstructionSelector::SupportedMachineOperatorFlags() {
2523   MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
2524   if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2525       IsFp64Mode()) {
2526     flags |= MachineOperatorBuilder::kFloat64RoundDown |
2527              MachineOperatorBuilder::kFloat64RoundUp |
2528              MachineOperatorBuilder::kFloat64RoundTruncate |
2529              MachineOperatorBuilder::kFloat64RoundTiesEven;
2530   }
2531 
2532   return flags | MachineOperatorBuilder::kWord32Ctz |
2533          MachineOperatorBuilder::kWord32Popcnt |
2534          MachineOperatorBuilder::kInt32DivIsSafe |
2535          MachineOperatorBuilder::kUint32DivIsSafe |
2536          MachineOperatorBuilder::kWord32ShiftIsSafe |
2537          MachineOperatorBuilder::kFloat32RoundDown |
2538          MachineOperatorBuilder::kFloat32RoundUp |
2539          MachineOperatorBuilder::kFloat32RoundTruncate |
2540          MachineOperatorBuilder::kFloat32RoundTiesEven;
2541 }
2542 
2543 // static
2544 MachineOperatorBuilder::AlignmentRequirements
AlignmentRequirements()2545 InstructionSelector::AlignmentRequirements() {
2546   if (IsMipsArchVariant(kMips32r6)) {
2547     return MachineOperatorBuilder::AlignmentRequirements::
2548         FullUnalignedAccessSupport();
2549   } else {
2550     DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
2551            IsMipsArchVariant(kMips32r2));
2552     return MachineOperatorBuilder::AlignmentRequirements::
2553         NoUnalignedAccessSupport();
2554   }
2555 }
2556 
2557 #undef SIMD_BINOP_LIST
2558 #undef SIMD_SHIFT_OP_LIST
2559 #undef SIMD_UNOP_LIST
2560 #undef SIMD_TYPE_LIST
2561 #undef TRACE_UNIMPL
2562 #undef TRACE
2563 
2564 }  // namespace compiler
2565 }  // namespace internal
2566 }  // namespace v8
2567