• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/codegen/assembler-inl.h"
6 #include "src/codegen/callable.h"
7 #include "src/codegen/macro-assembler.h"
8 #include "src/codegen/optimized-compilation-info.h"
9 #include "src/compiler/backend/code-generator-impl.h"
10 #include "src/compiler/backend/code-generator.h"
11 #include "src/compiler/backend/gap-resolver.h"
12 #include "src/compiler/node-matchers.h"
13 #include "src/compiler/osr.h"
14 #include "src/heap/memory-chunk.h"
15 
16 #if V8_ENABLE_WEBASSEMBLY
17 #include "src/wasm/wasm-code-manager.h"
18 #endif  // V8_ENABLE_WEBASSEMBLY
19 
20 namespace v8 {
21 namespace internal {
22 namespace compiler {
23 
24 #define __ tasm()->
25 
26 // TODO(plind): consider renaming these macros.
27 #define TRACE_MSG(msg)                                                      \
28   PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
29          __LINE__)
30 
31 #define TRACE_UNIMPL()                                                       \
32   PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \
33          __LINE__)
34 
35 // Adds Mips-specific methods to convert InstructionOperands.
36 class MipsOperandConverter final : public InstructionOperandConverter {
37  public:
MipsOperandConverter(CodeGenerator * gen,Instruction * instr)38   MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
39       : InstructionOperandConverter(gen, instr) {}
40 
OutputSingleRegister(size_t index=0)41   FloatRegister OutputSingleRegister(size_t index = 0) {
42     return ToSingleRegister(instr_->OutputAt(index));
43   }
44 
InputSingleRegister(size_t index)45   FloatRegister InputSingleRegister(size_t index) {
46     return ToSingleRegister(instr_->InputAt(index));
47   }
48 
ToSingleRegister(InstructionOperand * op)49   FloatRegister ToSingleRegister(InstructionOperand* op) {
50     // Single (Float) and Double register namespace is same on MIPS,
51     // both are typedefs of FPURegister.
52     return ToDoubleRegister(op);
53   }
54 
InputOrZeroRegister(size_t index)55   Register InputOrZeroRegister(size_t index) {
56     if (instr_->InputAt(index)->IsImmediate()) {
57       DCHECK_EQ(0, InputInt32(index));
58       return zero_reg;
59     }
60     return InputRegister(index);
61   }
62 
InputOrZeroDoubleRegister(size_t index)63   DoubleRegister InputOrZeroDoubleRegister(size_t index) {
64     if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
65 
66     return InputDoubleRegister(index);
67   }
68 
InputOrZeroSingleRegister(size_t index)69   DoubleRegister InputOrZeroSingleRegister(size_t index) {
70     if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
71 
72     return InputSingleRegister(index);
73   }
74 
InputImmediate(size_t index)75   Operand InputImmediate(size_t index) {
76     Constant constant = ToConstant(instr_->InputAt(index));
77     switch (constant.type()) {
78       case Constant::kInt32:
79         return Operand(constant.ToInt32());
80       case Constant::kFloat32:
81         return Operand::EmbeddedNumber(constant.ToFloat32());
82       case Constant::kFloat64:
83         return Operand::EmbeddedNumber(constant.ToFloat64().value());
84       case Constant::kInt64:
85       case Constant::kExternalReference:
86       case Constant::kCompressedHeapObject:
87       case Constant::kHeapObject:
88         // TODO(plind): Maybe we should handle ExtRef & HeapObj here?
89         //    maybe not done on arm due to const pool ??
90         break;
91       case Constant::kDelayedStringConstant:
92         return Operand::EmbeddedStringConstant(
93             constant.ToDelayedStringConstant());
94       case Constant::kRpoNumber:
95         UNREACHABLE();  // TODO(titzer): RPO immediates on mips?
96     }
97     UNREACHABLE();
98   }
99 
InputOperand(size_t index)100   Operand InputOperand(size_t index) {
101     InstructionOperand* op = instr_->InputAt(index);
102     if (op->IsRegister()) {
103       return Operand(ToRegister(op));
104     }
105     return InputImmediate(index);
106   }
107 
MemoryOperand(size_t * first_index)108   MemOperand MemoryOperand(size_t* first_index) {
109     const size_t index = *first_index;
110     switch (AddressingModeField::decode(instr_->opcode())) {
111       case kMode_None:
112         break;
113       case kMode_MRI:
114         *first_index += 2;
115         return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
116       case kMode_MRR:
117         // TODO(plind): r6 address mode, to be implemented ...
118         UNREACHABLE();
119     }
120     UNREACHABLE();
121   }
122 
MemoryOperand(size_t index=0)123   MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
124 
ToMemOperand(InstructionOperand * op) const125   MemOperand ToMemOperand(InstructionOperand* op) const {
126     DCHECK_NOT_NULL(op);
127     DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
128     return SlotToMemOperand(AllocatedOperand::cast(op)->index());
129   }
130 
SlotToMemOperand(int slot) const131   MemOperand SlotToMemOperand(int slot) const {
132     FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
133     return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
134   }
135 };
136 
HasRegisterInput(Instruction * instr,size_t index)137 static inline bool HasRegisterInput(Instruction* instr, size_t index) {
138   return instr->InputAt(index)->IsRegister();
139 }
140 
141 namespace {
142 
143 class OutOfLineRecordWrite final : public OutOfLineCode {
144  public:
OutOfLineRecordWrite(CodeGenerator * gen,Register object,Register index,Register value,Register scratch0,Register scratch1,RecordWriteMode mode,StubCallMode stub_mode)145   OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
146                        Register value, Register scratch0, Register scratch1,
147                        RecordWriteMode mode, StubCallMode stub_mode)
148       : OutOfLineCode(gen),
149         object_(object),
150         index_(index),
151         value_(value),
152         scratch0_(scratch0),
153         scratch1_(scratch1),
154         mode_(mode),
155 #if V8_ENABLE_WEBASSEMBLY
156         stub_mode_(stub_mode),
157 #endif  // V8_ENABLE_WEBASSEMBLY
158         must_save_lr_(!gen->frame_access_state()->has_frame()),
159         zone_(gen->zone()) {
160     DCHECK(!AreAliased(object, index, scratch0, scratch1));
161     DCHECK(!AreAliased(value, index, scratch0, scratch1));
162   }
163 
Generate()164   void Generate() final {
165     __ CheckPageFlag(value_, scratch0_,
166                      MemoryChunk::kPointersToHereAreInterestingMask, eq,
167                      exit());
168     __ Addu(scratch1_, object_, index_);
169     RememberedSetAction const remembered_set_action =
170         mode_ > RecordWriteMode::kValueIsMap ||
171                 FLAG_use_full_record_write_builtin
172             ? RememberedSetAction::kEmit
173             : RememberedSetAction::kOmit;
174     SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
175                                             ? SaveFPRegsMode::kSave
176                                             : SaveFPRegsMode::kIgnore;
177     if (must_save_lr_) {
178       // We need to save and restore ra if the frame was elided.
179       __ Push(ra);
180     }
181 
182     if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
183       __ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode);
184 #if V8_ENABLE_WEBASSEMBLY
185     } else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
186       // A direct call to a wasm runtime stub defined in this module.
187       // Just encode the stub index. This will be patched when the code
188       // is added to the native module and copied into wasm code space.
189       __ CallRecordWriteStubSaveRegisters(object_, scratch1_,
190                                           remembered_set_action, save_fp_mode,
191                                           StubCallMode::kCallWasmRuntimeStub);
192 #endif  // V8_ENABLE_WEBASSEMBLY
193     } else {
194       __ CallRecordWriteStubSaveRegisters(object_, scratch1_,
195                                           remembered_set_action, save_fp_mode);
196     }
197     if (must_save_lr_) {
198       __ Pop(ra);
199     }
200   }
201 
202  private:
203   Register const object_;
204   Register const index_;
205   Register const value_;
206   Register const scratch0_;
207   Register const scratch1_;
208   RecordWriteMode const mode_;
209 #if V8_ENABLE_WEBASSEMBLY
210   StubCallMode const stub_mode_;
211 #endif  // V8_ENABLE_WEBASSEMBLY
212   bool must_save_lr_;
213   Zone* zone_;
214 };
215 
216 #define CREATE_OOL_CLASS(ool_name, tasm_ool_name, T)                 \
217   class ool_name final : public OutOfLineCode {                      \
218    public:                                                           \
219     ool_name(CodeGenerator* gen, T dst, T src1, T src2)              \
220         : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \
221                                                                      \
222     void Generate() final { __ tasm_ool_name(dst_, src1_, src2_); }  \
223                                                                      \
224    private:                                                          \
225     T const dst_;                                                    \
226     T const src1_;                                                   \
227     T const src2_;                                                   \
228   }
229 
230 CREATE_OOL_CLASS(OutOfLineFloat32Max, Float32MaxOutOfLine, FPURegister);
231 CREATE_OOL_CLASS(OutOfLineFloat32Min, Float32MinOutOfLine, FPURegister);
232 CREATE_OOL_CLASS(OutOfLineFloat64Max, Float64MaxOutOfLine, DoubleRegister);
233 CREATE_OOL_CLASS(OutOfLineFloat64Min, Float64MinOutOfLine, DoubleRegister);
234 
235 #undef CREATE_OOL_CLASS
236 
FlagsConditionToConditionCmp(FlagsCondition condition)237 Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
238   switch (condition) {
239     case kEqual:
240       return eq;
241     case kNotEqual:
242       return ne;
243     case kSignedLessThan:
244       return lt;
245     case kSignedGreaterThanOrEqual:
246       return ge;
247     case kSignedLessThanOrEqual:
248       return le;
249     case kSignedGreaterThan:
250       return gt;
251     case kUnsignedLessThan:
252       return lo;
253     case kUnsignedGreaterThanOrEqual:
254       return hs;
255     case kUnsignedLessThanOrEqual:
256       return ls;
257     case kUnsignedGreaterThan:
258       return hi;
259     case kUnorderedEqual:
260     case kUnorderedNotEqual:
261       break;
262     default:
263       break;
264   }
265   UNREACHABLE();
266 }
267 
FlagsConditionToConditionTst(FlagsCondition condition)268 Condition FlagsConditionToConditionTst(FlagsCondition condition) {
269   switch (condition) {
270     case kNotEqual:
271       return ne;
272     case kEqual:
273       return eq;
274     default:
275       break;
276   }
277   UNREACHABLE();
278 }
279 
FlagsConditionToConditionCmpFPU(bool * predicate,FlagsCondition condition)280 FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
281                                              FlagsCondition condition) {
282   switch (condition) {
283     case kEqual:
284       *predicate = true;
285       return EQ;
286     case kNotEqual:
287       *predicate = false;
288       return EQ;
289     case kUnsignedLessThan:
290       *predicate = true;
291       return OLT;
292     case kUnsignedGreaterThanOrEqual:
293       *predicate = false;
294       return OLT;
295     case kUnsignedLessThanOrEqual:
296       *predicate = true;
297       return OLE;
298     case kUnsignedGreaterThan:
299       *predicate = false;
300       return OLE;
301     case kUnorderedEqual:
302     case kUnorderedNotEqual:
303       *predicate = true;
304       break;
305     default:
306       *predicate = true;
307       break;
308   }
309   UNREACHABLE();
310 }
311 
312 #define UNSUPPORTED_COND(opcode, condition)                                    \
313   StdoutStream{} << "Unsupported " << #opcode << " condition: \"" << condition \
314                  << "\"";                                                      \
315   UNIMPLEMENTED();
316 
317 }  // namespace
318 
319 #define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr)          \
320   do {                                                   \
321     __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
322     __ sync();                                           \
323   } while (0)
324 
325 #define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr)               \
326   do {                                                         \
327     __ sync();                                                 \
328     __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \
329     __ sync();                                                 \
330   } while (0)
331 
332 #define ASSEMBLE_ATOMIC_BINOP(bin_instr)                                \
333   do {                                                                  \
334     Label binop;                                                        \
335     __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
336     __ sync();                                                          \
337     __ bind(&binop);                                                    \
338     __ Ll(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0));       \
339     __ bin_instr(i.TempRegister(1), i.OutputRegister(0),                \
340                  Operand(i.InputRegister(2)));                          \
341     __ Sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0));         \
342     __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg));   \
343     __ sync();                                                          \
344   } while (0)
345 
346 #define ASSEMBLE_ATOMIC64_LOGIC_BINOP(bin_instr, external)                     \
347   do {                                                                         \
348     if (IsMipsArchVariant(kMips32r6)) {                                        \
349       Label binop;                                                             \
350       Register oldval_low =                                                    \
351           instr->OutputCount() >= 1 ? i.OutputRegister(0) : i.TempRegister(1); \
352       Register oldval_high =                                                   \
353           instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(2); \
354       __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));      \
355       __ sync();                                                               \
356       __ bind(&binop);                                                         \
357       __ llx(oldval_high, MemOperand(i.TempRegister(0), 4));                   \
358       __ ll(oldval_low, MemOperand(i.TempRegister(0), 0));                     \
359       __ bin_instr(i.TempRegister(1), i.TempRegister(2), oldval_low,           \
360                    oldval_high, i.InputRegister(2), i.InputRegister(3));       \
361       __ scx(i.TempRegister(2), MemOperand(i.TempRegister(0), 4));             \
362       __ sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0));              \
363       __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg));        \
364       __ sync();                                                               \
365     } else {                                                                   \
366       FrameScope scope(tasm(), StackFrame::MANUAL);                            \
367       __ Addu(a0, i.InputRegister(0), i.InputRegister(1));                     \
368       __ PushCallerSaved(SaveFPRegsMode::kIgnore, v0, v1);                     \
369       __ PrepareCallCFunction(3, 0, kScratchReg);                              \
370       __ CallCFunction(ExternalReference::external(), 3, 0);                   \
371       __ PopCallerSaved(SaveFPRegsMode::kIgnore, v0, v1);                      \
372     }                                                                          \
373   } while (0)
374 
375 #define ASSEMBLE_ATOMIC64_ARITH_BINOP(bin_instr, external)                     \
376   do {                                                                         \
377     if (IsMipsArchVariant(kMips32r6)) {                                        \
378       Label binop;                                                             \
379       Register oldval_low =                                                    \
380           instr->OutputCount() >= 1 ? i.OutputRegister(0) : i.TempRegister(1); \
381       Register oldval_high =                                                   \
382           instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(2); \
383       __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));      \
384       __ sync();                                                               \
385       __ bind(&binop);                                                         \
386       __ llx(oldval_high, MemOperand(i.TempRegister(0), 4));                   \
387       __ ll(oldval_low, MemOperand(i.TempRegister(0), 0));                     \
388       __ bin_instr(i.TempRegister(1), i.TempRegister(2), oldval_low,           \
389                    oldval_high, i.InputRegister(2), i.InputRegister(3),        \
390                    kScratchReg, kScratchReg2);                                 \
391       __ scx(i.TempRegister(2), MemOperand(i.TempRegister(0), 4));             \
392       __ sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0));              \
393       __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg));        \
394       __ sync();                                                               \
395     } else {                                                                   \
396       FrameScope scope(tasm(), StackFrame::MANUAL);                            \
397       __ Addu(a0, i.InputRegister(0), i.InputRegister(1));                     \
398       __ PushCallerSaved(SaveFPRegsMode::kIgnore, v0, v1);                     \
399       __ PrepareCallCFunction(3, 0, kScratchReg);                              \
400       __ CallCFunction(ExternalReference::external(), 3, 0);                   \
401       __ PopCallerSaved(SaveFPRegsMode::kIgnore, v0, v1);                      \
402     }                                                                          \
403   } while (0)
404 
405 #define ASSEMBLE_ATOMIC_BINOP_EXT(sign_extend, size, bin_instr)                \
406   do {                                                                         \
407     Label binop;                                                               \
408     __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));        \
409     __ andi(i.TempRegister(3), i.TempRegister(0), 0x3);                        \
410     __ Subu(i.TempRegister(0), i.TempRegister(0), Operand(i.TempRegister(3))); \
411     __ sll(i.TempRegister(3), i.TempRegister(3), 3);                           \
412     __ sync();                                                                 \
413     __ bind(&binop);                                                           \
414     __ Ll(i.TempRegister(1), MemOperand(i.TempRegister(0), 0));                \
415     __ ExtractBits(i.OutputRegister(0), i.TempRegister(1), i.TempRegister(3),  \
416                    size, sign_extend);                                         \
417     __ bin_instr(i.TempRegister(2), i.OutputRegister(0),                       \
418                  Operand(i.InputRegister(2)));                                 \
419     __ InsertBits(i.TempRegister(1), i.TempRegister(2), i.TempRegister(3),     \
420                   size);                                                       \
421     __ Sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0));                \
422     __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg));          \
423     __ sync();                                                                 \
424   } while (0)
425 
426 #define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER()                               \
427   do {                                                                   \
428     Label exchange;                                                      \
429     __ sync();                                                           \
430     __ bind(&exchange);                                                  \
431     __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));  \
432     __ Ll(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0));        \
433     __ mov(i.TempRegister(1), i.InputRegister(2));                       \
434     __ Sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0));          \
435     __ BranchShort(&exchange, eq, i.TempRegister(1), Operand(zero_reg)); \
436     __ sync();                                                           \
437   } while (0)
438 
439 #define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(sign_extend, size)                \
440   do {                                                                         \
441     Label exchange;                                                            \
442     __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));        \
443     __ andi(i.TempRegister(1), i.TempRegister(0), 0x3);                        \
444     __ Subu(i.TempRegister(0), i.TempRegister(0), Operand(i.TempRegister(1))); \
445     __ sll(i.TempRegister(1), i.TempRegister(1), 3);                           \
446     __ sync();                                                                 \
447     __ bind(&exchange);                                                        \
448     __ Ll(i.TempRegister(2), MemOperand(i.TempRegister(0), 0));                \
449     __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1),  \
450                    size, sign_extend);                                         \
451     __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1),    \
452                   size);                                                       \
453     __ Sc(i.TempRegister(2), MemOperand(i.TempRegister(0), 0));                \
454     __ BranchShort(&exchange, eq, i.TempRegister(2), Operand(zero_reg));       \
455     __ sync();                                                                 \
456   } while (0)
457 
458 #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER()                      \
459   do {                                                                  \
460     Label compareExchange;                                              \
461     Label exit;                                                         \
462     __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
463     __ sync();                                                          \
464     __ bind(&compareExchange);                                          \
465     __ Ll(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0));       \
466     __ BranchShort(&exit, ne, i.InputRegister(2),                       \
467                    Operand(i.OutputRegister(0)));                       \
468     __ mov(i.TempRegister(2), i.InputRegister(3));                      \
469     __ Sc(i.TempRegister(2), MemOperand(i.TempRegister(0), 0));         \
470     __ BranchShort(&compareExchange, eq, i.TempRegister(2),             \
471                    Operand(zero_reg));                                  \
472     __ bind(&exit);                                                     \
473     __ sync();                                                          \
474   } while (0)
475 
476 #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(sign_extend, size)        \
477   do {                                                                         \
478     Label compareExchange;                                                     \
479     Label exit;                                                                \
480     __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));        \
481     __ andi(i.TempRegister(1), i.TempRegister(0), 0x3);                        \
482     __ Subu(i.TempRegister(0), i.TempRegister(0), Operand(i.TempRegister(1))); \
483     __ sll(i.TempRegister(1), i.TempRegister(1), 3);                           \
484     __ sync();                                                                 \
485     __ bind(&compareExchange);                                                 \
486     __ Ll(i.TempRegister(2), MemOperand(i.TempRegister(0), 0));                \
487     __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1),  \
488                    size, sign_extend);                                         \
489     __ ExtractBits(i.InputRegister(2), i.InputRegister(2), zero_reg, size,     \
490                    sign_extend);                                               \
491     __ BranchShort(&exit, ne, i.InputRegister(2),                              \
492                    Operand(i.OutputRegister(0)));                              \
493     __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1),    \
494                   size);                                                       \
495     __ Sc(i.TempRegister(2), MemOperand(i.TempRegister(0), 0));                \
496     __ BranchShort(&compareExchange, eq, i.TempRegister(2),                    \
497                    Operand(zero_reg));                                         \
498     __ bind(&exit);                                                            \
499     __ sync();                                                                 \
500   } while (0)
501 
502 #define ASSEMBLE_IEEE754_BINOP(name)                                        \
503   do {                                                                      \
504     FrameScope scope(tasm(), StackFrame::MANUAL);                           \
505     __ PrepareCallCFunction(0, 2, kScratchReg);                             \
506     __ MovToFloatParameters(i.InputDoubleRegister(0),                       \
507                             i.InputDoubleRegister(1));                      \
508     __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
509     /* Move the result in the double result register. */                    \
510     __ MovFromFloatResult(i.OutputDoubleRegister());                        \
511   } while (0)
512 
513 #define ASSEMBLE_IEEE754_UNOP(name)                                         \
514   do {                                                                      \
515     FrameScope scope(tasm(), StackFrame::MANUAL);                           \
516     __ PrepareCallCFunction(0, 1, kScratchReg);                             \
517     __ MovToFloatParameter(i.InputDoubleRegister(0));                       \
518     __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
519     /* Move the result in the double result register. */                    \
520     __ MovFromFloatResult(i.OutputDoubleRegister());                        \
521   } while (0)
522 
523 #define ASSEMBLE_F64X2_ARITHMETIC_BINOP(op)                     \
524   do {                                                          \
525     __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \
526           i.InputSimd128Register(1));                           \
527   } while (0)
528 
529 #define ASSEMBLE_SIMD_EXTENDED_MULTIPLY(op0, op1)                           \
530   do {                                                                      \
531     CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);                           \
532     __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);            \
533     __ op0(kSimd128ScratchReg, kSimd128RegZero, i.InputSimd128Register(0)); \
534     __ op0(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(1));    \
535     __ op1(i.OutputSimd128Register(), kSimd128ScratchReg, kSimd128RegZero); \
536   } while (0)
537 
AssembleDeconstructFrame()538 void CodeGenerator::AssembleDeconstructFrame() {
539   __ mov(sp, fp);
540   __ Pop(ra, fp);
541 }
542 
AssemblePrepareTailCall()543 void CodeGenerator::AssemblePrepareTailCall() {
544   if (frame_access_state()->has_frame()) {
545     __ lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
546     __ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
547   }
548   frame_access_state()->SetFrameAccessToSP();
549 }
550 namespace {
551 
AdjustStackPointerForTailCall(TurboAssembler * tasm,FrameAccessState * state,int new_slot_above_sp,bool allow_shrinkage=true)552 void AdjustStackPointerForTailCall(TurboAssembler* tasm,
553                                    FrameAccessState* state,
554                                    int new_slot_above_sp,
555                                    bool allow_shrinkage = true) {
556   int current_sp_offset = state->GetSPToFPSlotCount() +
557                           StandardFrameConstants::kFixedSlotCountAboveFp;
558   int stack_slot_delta = new_slot_above_sp - current_sp_offset;
559   if (stack_slot_delta > 0) {
560     tasm->Subu(sp, sp, stack_slot_delta * kSystemPointerSize);
561     state->IncreaseSPDelta(stack_slot_delta);
562   } else if (allow_shrinkage && stack_slot_delta < 0) {
563     tasm->Addu(sp, sp, -stack_slot_delta * kSystemPointerSize);
564     state->IncreaseSPDelta(stack_slot_delta);
565   }
566 }
567 
568 }  // namespace
569 
AssembleTailCallBeforeGap(Instruction * instr,int first_unused_slot_offset)570 void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
571                                               int first_unused_slot_offset) {
572   AdjustStackPointerForTailCall(tasm(), frame_access_state(),
573                                 first_unused_slot_offset, false);
574 }
575 
AssembleTailCallAfterGap(Instruction * instr,int first_unused_slot_offset)576 void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
577                                              int first_unused_slot_offset) {
578   AdjustStackPointerForTailCall(tasm(), frame_access_state(),
579                                 first_unused_slot_offset);
580 }
581 
582 // Check that {kJavaScriptCallCodeStartRegister} is correct.
AssembleCodeStartRegisterCheck()583 void CodeGenerator::AssembleCodeStartRegisterCheck() {
584   __ ComputeCodeStartAddress(kScratchReg);
585   __ Assert(eq, AbortReason::kWrongFunctionCodeStart,
586             kJavaScriptCallCodeStartRegister, Operand(kScratchReg));
587 }
588 
589 // Check if the code object is marked for deoptimization. If it is, then it
590 // jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
591 // to:
592 //    1. read from memory the word that contains that bit, which can be found in
593 //       the flags in the referenced {CodeDataContainer} object;
594 //    2. test kMarkedForDeoptimizationBit in those flags; and
595 //    3. if it is not zero then it jumps to the builtin.
BailoutIfDeoptimized()596 void CodeGenerator::BailoutIfDeoptimized() {
597   int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
598   __ lw(kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset));
599   __ lw(kScratchReg,
600         FieldMemOperand(kScratchReg,
601                         CodeDataContainer::kKindSpecificFlagsOffset));
602   __ And(kScratchReg, kScratchReg,
603          Operand(1 << Code::kMarkedForDeoptimizationBit));
604   __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
605           RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
606 }
607 
608 // Assembles an instruction after register allocation, producing machine code.
AssembleArchInstruction(Instruction * instr)609 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
610     Instruction* instr) {
611   MipsOperandConverter i(this, instr);
612   InstructionCode opcode = instr->opcode();
613   ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
614   switch (arch_opcode) {
615     case kArchCallCodeObject: {
616       if (instr->InputAt(0)->IsImmediate()) {
617         __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
618       } else {
619         Register reg = i.InputRegister(0);
620         DCHECK_IMPLIES(
621             instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
622             reg == kJavaScriptCallCodeStartRegister);
623         __ Call(reg, reg, Code::kHeaderSize - kHeapObjectTag);
624       }
625       RecordCallPosition(instr);
626       frame_access_state()->ClearSPDelta();
627       break;
628     }
629     case kArchCallBuiltinPointer: {
630       DCHECK(!instr->InputAt(0)->IsImmediate());
631       Register builtin_index = i.InputRegister(0);
632       __ CallBuiltinByIndex(builtin_index);
633       RecordCallPosition(instr);
634       frame_access_state()->ClearSPDelta();
635       break;
636     }
637 #if V8_ENABLE_WEBASSEMBLY
638     case kArchCallWasmFunction: {
639       if (instr->InputAt(0)->IsImmediate()) {
640         Constant constant = i.ToConstant(instr->InputAt(0));
641         Address wasm_code = static_cast<Address>(constant.ToInt32());
642         __ Call(wasm_code, constant.rmode());
643       } else {
644         __ Call(i.InputRegister(0));
645       }
646       RecordCallPosition(instr);
647       frame_access_state()->ClearSPDelta();
648       break;
649     }
650     case kArchTailCallWasm: {
651       if (instr->InputAt(0)->IsImmediate()) {
652         Constant constant = i.ToConstant(instr->InputAt(0));
653         Address wasm_code = static_cast<Address>(constant.ToInt32());
654         __ Jump(wasm_code, constant.rmode());
655       } else {
656         __ Jump(i.InputRegister(0));
657       }
658       frame_access_state()->ClearSPDelta();
659       frame_access_state()->SetFrameAccessToDefault();
660       break;
661     }
662 #endif  // V8_ENABLE_WEBASSEMBLY
663     case kArchTailCallCodeObject: {
664       if (instr->InputAt(0)->IsImmediate()) {
665         __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
666       } else {
667         Register reg = i.InputRegister(0);
668         DCHECK_IMPLIES(
669             instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
670             reg == kJavaScriptCallCodeStartRegister);
671         __ Addu(reg, reg, Code::kHeaderSize - kHeapObjectTag);
672         __ Jump(reg);
673       }
674       frame_access_state()->ClearSPDelta();
675       frame_access_state()->SetFrameAccessToDefault();
676       break;
677     }
678     case kArchTailCallAddress: {
679       CHECK(!instr->InputAt(0)->IsImmediate());
680       Register reg = i.InputRegister(0);
681       DCHECK_IMPLIES(
682           instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
683           reg == kJavaScriptCallCodeStartRegister);
684       __ Jump(reg);
685       frame_access_state()->ClearSPDelta();
686       frame_access_state()->SetFrameAccessToDefault();
687       break;
688     }
689     case kArchCallJSFunction: {
690       Register func = i.InputRegister(0);
691       if (FLAG_debug_code) {
692         // Check the function's context matches the context argument.
693         __ lw(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
694         __ Assert(eq, AbortReason::kWrongFunctionContext, cp,
695                   Operand(kScratchReg));
696       }
697       static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
698       __ lw(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
699       __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
700       __ Call(a2);
701       RecordCallPosition(instr);
702       frame_access_state()->ClearSPDelta();
703       frame_access_state()->SetFrameAccessToDefault();
704       break;
705     }
706     case kArchPrepareCallCFunction: {
707       int const num_parameters = MiscField::decode(instr->opcode());
708       __ PrepareCallCFunction(num_parameters, kScratchReg);
709       // Frame alignment requires using FP-relative frame addressing.
710       frame_access_state()->SetFrameAccessToFP();
711       break;
712     }
713     case kArchSaveCallerRegisters: {
714       fp_mode_ =
715           static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
716       DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
717              fp_mode_ == SaveFPRegsMode::kSave);
718       // kReturnRegister0 should have been saved before entering the stub.
719       int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
720       DCHECK(IsAligned(bytes, kSystemPointerSize));
721       DCHECK_EQ(0, frame_access_state()->sp_delta());
722       frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
723       DCHECK(!caller_registers_saved_);
724       caller_registers_saved_ = true;
725       break;
726     }
727     case kArchRestoreCallerRegisters: {
728       DCHECK(fp_mode_ ==
729              static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
730       DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
731              fp_mode_ == SaveFPRegsMode::kSave);
732       // Don't overwrite the returned value.
733       int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
734       frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
735       DCHECK_EQ(0, frame_access_state()->sp_delta());
736       DCHECK(caller_registers_saved_);
737       caller_registers_saved_ = false;
738       break;
739     }
740     case kArchPrepareTailCall:
741       AssemblePrepareTailCall();
742       break;
743     case kArchCallCFunction: {
744       int const num_parameters = MiscField::decode(instr->opcode());
745 #if V8_ENABLE_WEBASSEMBLY
746       Label start_call;
747       bool isWasmCapiFunction =
748           linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
749       // from start_call to return address.
750       int offset = __ root_array_available() ? 64 : 88;
751 #endif  // V8_ENABLE_WEBASSEMBLY
752 #if V8_HOST_ARCH_MIPS
753       if (FLAG_debug_code) {
754         offset += 16;
755       }
756 #endif
757 
758 #if V8_ENABLE_WEBASSEMBLY
759       if (isWasmCapiFunction) {
760         // Put the return address in a stack slot.
761         __ mov(kScratchReg, ra);
762         __ bind(&start_call);
763         __ nal();
764         __ nop();
765         __ Addu(ra, ra, offset - 8);  // 8 = nop + nal
766         __ sw(ra, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
767         __ mov(ra, kScratchReg);
768       }
769 #endif  // V8_ENABLE_WEBASSEMBLY
770 
771       if (instr->InputAt(0)->IsImmediate()) {
772         ExternalReference ref = i.InputExternalReference(0);
773         __ CallCFunction(ref, num_parameters);
774       } else {
775         Register func = i.InputRegister(0);
776         __ CallCFunction(func, num_parameters);
777       }
778 
779 #if V8_ENABLE_WEBASSEMBLY
780       if (isWasmCapiFunction) {
781         CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call));
782         RecordSafepoint(instr->reference_map());
783       }
784 #endif  // V8_ENABLE_WEBASSEMBLY
785 
786       frame_access_state()->SetFrameAccessToDefault();
787       // Ideally, we should decrement SP delta to match the change of stack
788       // pointer in CallCFunction. However, for certain architectures (e.g.
789       // ARM), there may be more strict alignment requirement, causing old SP
790       // to be saved on the stack. In those cases, we can not calculate the SP
791       // delta statically.
792       frame_access_state()->ClearSPDelta();
793       if (caller_registers_saved_) {
794         // Need to re-sync SP delta introduced in kArchSaveCallerRegisters.
795         // Here, we assume the sequence to be:
796         //   kArchSaveCallerRegisters;
797         //   kArchCallCFunction;
798         //   kArchRestoreCallerRegisters;
799         int bytes =
800             __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
801         frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
802       }
803       break;
804     }
805     case kArchJmp:
806       AssembleArchJump(i.InputRpo(0));
807       break;
808     case kArchBinarySearchSwitch:
809       AssembleArchBinarySearchSwitch(instr);
810       break;
811     case kArchTableSwitch:
812       AssembleArchTableSwitch(instr);
813       break;
814     case kArchAbortCSADcheck:
815       DCHECK(i.InputRegister(0) == a0);
816       {
817         // We don't actually want to generate a pile of code for this, so just
818         // claim there is a stack frame, without generating one.
819         FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
820         __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
821                 RelocInfo::CODE_TARGET);
822       }
823       __ stop();
824       break;
825     case kArchDebugBreak:
826       __ DebugBreak();
827       break;
828     case kArchComment:
829       __ RecordComment(reinterpret_cast<const char*>(i.InputInt32(0)));
830       break;
831     case kArchNop:
832     case kArchThrowTerminator:
833       // don't emit code for nops.
834       break;
835     case kArchDeoptimize: {
836       DeoptimizationExit* exit =
837           BuildTranslation(instr, -1, 0, 0, OutputFrameStateCombine::Ignore());
838       __ Branch(exit->label());
839       break;
840     }
841     case kArchRet:
842       AssembleReturn(instr->InputAt(0));
843       break;
844     case kArchStackPointerGreaterThan: {
845       Register lhs_register = sp;
846       uint32_t offset;
847       if (ShouldApplyOffsetToStackCheck(instr, &offset)) {
848         lhs_register = i.TempRegister(1);
849         __ Subu(lhs_register, sp, offset);
850       }
851       __ Sltu(i.TempRegister(0), i.InputRegister(0), lhs_register);
852       break;
853     }
854     case kArchStackCheckOffset:
855       __ Move(i.OutputRegister(), Smi::FromInt(GetStackCheckOffset()));
856       break;
857     case kArchFramePointer:
858       __ mov(i.OutputRegister(), fp);
859       break;
860     case kArchParentFramePointer:
861       if (frame_access_state()->has_frame()) {
862         __ lw(i.OutputRegister(), MemOperand(fp, 0));
863       } else {
864         __ mov(i.OutputRegister(), fp);
865       }
866       break;
867     case kArchTruncateDoubleToI:
868       __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
869                            i.InputDoubleRegister(0), DetermineStubCallMode());
870       break;
871     case kArchStoreWithWriteBarrier:
872     case kArchAtomicStoreWithWriteBarrier: {
873       RecordWriteMode mode =
874           static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
875       Register object = i.InputRegister(0);
876       Register index = i.InputRegister(1);
877       Register value = i.InputRegister(2);
878       Register scratch0 = i.TempRegister(0);
879       Register scratch1 = i.TempRegister(1);
880       auto ool = zone()->New<OutOfLineRecordWrite>(this, object, index, value,
881                                                    scratch0, scratch1, mode,
882                                                    DetermineStubCallMode());
883       __ Addu(kScratchReg, object, index);
884       if (arch_opcode == kArchStoreWithWriteBarrier) {
885         __ sw(value, MemOperand(kScratchReg));
886       } else {
887         DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode);
888         __ sync();
889         __ sw(value, MemOperand(kScratchReg));
890         __ sync();
891       }
892       if (mode > RecordWriteMode::kValueIsPointer) {
893         __ JumpIfSmi(value, ool->exit());
894       }
895       __ CheckPageFlag(object, scratch0,
896                        MemoryChunk::kPointersFromHereAreInterestingMask, ne,
897                        ool->entry());
898       __ bind(ool->exit());
899       break;
900     }
901     case kArchStackSlot: {
902       FrameOffset offset =
903           frame_access_state()->GetFrameOffset(i.InputInt32(0));
904       Register base_reg = offset.from_stack_pointer() ? sp : fp;
905       __ Addu(i.OutputRegister(), base_reg, Operand(offset.offset()));
906       if (FLAG_debug_code > 0) {
907         // Verify that the output_register is properly aligned
908         __ And(kScratchReg, i.OutputRegister(),
909                Operand(kSystemPointerSize - 1));
910         __ Assert(eq, AbortReason::kAllocationIsNotDoubleAligned, kScratchReg,
911                   Operand(zero_reg));
912       }
913       break;
914     }
915     case kIeee754Float64Acos:
916       ASSEMBLE_IEEE754_UNOP(acos);
917       break;
918     case kIeee754Float64Acosh:
919       ASSEMBLE_IEEE754_UNOP(acosh);
920       break;
921     case kIeee754Float64Asin:
922       ASSEMBLE_IEEE754_UNOP(asin);
923       break;
924     case kIeee754Float64Asinh:
925       ASSEMBLE_IEEE754_UNOP(asinh);
926       break;
927     case kIeee754Float64Atan:
928       ASSEMBLE_IEEE754_UNOP(atan);
929       break;
930     case kIeee754Float64Atanh:
931       ASSEMBLE_IEEE754_UNOP(atanh);
932       break;
933     case kIeee754Float64Atan2:
934       ASSEMBLE_IEEE754_BINOP(atan2);
935       break;
936     case kIeee754Float64Cos:
937       ASSEMBLE_IEEE754_UNOP(cos);
938       break;
939     case kIeee754Float64Cosh:
940       ASSEMBLE_IEEE754_UNOP(cosh);
941       break;
942     case kIeee754Float64Cbrt:
943       ASSEMBLE_IEEE754_UNOP(cbrt);
944       break;
945     case kIeee754Float64Exp:
946       ASSEMBLE_IEEE754_UNOP(exp);
947       break;
948     case kIeee754Float64Expm1:
949       ASSEMBLE_IEEE754_UNOP(expm1);
950       break;
951     case kIeee754Float64Log:
952       ASSEMBLE_IEEE754_UNOP(log);
953       break;
954     case kIeee754Float64Log1p:
955       ASSEMBLE_IEEE754_UNOP(log1p);
956       break;
957     case kIeee754Float64Log10:
958       ASSEMBLE_IEEE754_UNOP(log10);
959       break;
960     case kIeee754Float64Log2:
961       ASSEMBLE_IEEE754_UNOP(log2);
962       break;
963     case kIeee754Float64Pow:
964       ASSEMBLE_IEEE754_BINOP(pow);
965       break;
966     case kIeee754Float64Sin:
967       ASSEMBLE_IEEE754_UNOP(sin);
968       break;
969     case kIeee754Float64Sinh:
970       ASSEMBLE_IEEE754_UNOP(sinh);
971       break;
972     case kIeee754Float64Tan:
973       ASSEMBLE_IEEE754_UNOP(tan);
974       break;
975     case kIeee754Float64Tanh:
976       ASSEMBLE_IEEE754_UNOP(tanh);
977       break;
978     case kMipsAdd:
979       __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
980       break;
981     case kMipsAddOvf:
982       __ AddOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1),
983                      kScratchReg);
984       break;
985     case kMipsSub:
986       __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
987       break;
988     case kMipsSubOvf:
989       __ SubOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1),
990                      kScratchReg);
991       break;
992     case kMipsMul:
993       __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
994       break;
995     case kMipsMulOvf:
996       __ MulOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1),
997                      kScratchReg);
998       break;
999     case kMipsMulHigh:
1000       __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1001       break;
1002     case kMipsMulHighU:
1003       __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1004       break;
1005     case kMipsDiv:
1006       __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1007       if (IsMipsArchVariant(kMips32r6)) {
1008         __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1009       } else {
1010         __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
1011       }
1012       break;
1013     case kMipsDivU:
1014       __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1015       if (IsMipsArchVariant(kMips32r6)) {
1016         __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1017       } else {
1018         __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
1019       }
1020       break;
1021     case kMipsMod:
1022       __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1023       break;
1024     case kMipsModU:
1025       __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1026       break;
1027     case kMipsAnd:
1028       __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1029       break;
1030     case kMipsOr:
1031       __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1032       break;
1033     case kMipsNor:
1034       if (instr->InputAt(1)->IsRegister()) {
1035         __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1036       } else {
1037         DCHECK_EQ(0, i.InputOperand(1).immediate());
1038         __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
1039       }
1040       break;
1041     case kMipsXor:
1042       __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1043       break;
1044     case kMipsClz:
1045       __ Clz(i.OutputRegister(), i.InputRegister(0));
1046       break;
1047     case kMipsCtz: {
1048       Register src = i.InputRegister(0);
1049       Register dst = i.OutputRegister();
1050       __ Ctz(dst, src);
1051     } break;
1052     case kMipsPopcnt: {
1053       Register src = i.InputRegister(0);
1054       Register dst = i.OutputRegister();
1055       __ Popcnt(dst, src);
1056     } break;
1057     case kMipsShl:
1058       if (instr->InputAt(1)->IsRegister()) {
1059         __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1060       } else {
1061         int32_t imm = i.InputOperand(1).immediate();
1062         __ sll(i.OutputRegister(), i.InputRegister(0), imm);
1063       }
1064       break;
1065     case kMipsShr:
1066       if (instr->InputAt(1)->IsRegister()) {
1067         __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1068       } else {
1069         int32_t imm = i.InputOperand(1).immediate();
1070         __ srl(i.OutputRegister(), i.InputRegister(0), imm);
1071       }
1072       break;
1073     case kMipsSar:
1074       if (instr->InputAt(1)->IsRegister()) {
1075         __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1076       } else {
1077         int32_t imm = i.InputOperand(1).immediate();
1078         __ sra(i.OutputRegister(), i.InputRegister(0), imm);
1079       }
1080       break;
1081     case kMipsShlPair: {
1082       Register second_output =
1083           instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1084       if (instr->InputAt(2)->IsRegister()) {
1085         __ ShlPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1086                    i.InputRegister(1), i.InputRegister(2), kScratchReg,
1087                    kScratchReg2);
1088       } else {
1089         uint32_t imm = i.InputOperand(2).immediate();
1090         __ ShlPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1091                    i.InputRegister(1), imm, kScratchReg);
1092       }
1093     } break;
1094     case kMipsShrPair: {
1095       Register second_output =
1096           instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1097       if (instr->InputAt(2)->IsRegister()) {
1098         __ ShrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1099                    i.InputRegister(1), i.InputRegister(2), kScratchReg,
1100                    kScratchReg2);
1101       } else {
1102         uint32_t imm = i.InputOperand(2).immediate();
1103         __ ShrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1104                    i.InputRegister(1), imm, kScratchReg);
1105       }
1106     } break;
1107     case kMipsSarPair: {
1108       Register second_output =
1109           instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1110       if (instr->InputAt(2)->IsRegister()) {
1111         __ SarPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1112                    i.InputRegister(1), i.InputRegister(2), kScratchReg,
1113                    kScratchReg2);
1114       } else {
1115         uint32_t imm = i.InputOperand(2).immediate();
1116         __ SarPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1117                    i.InputRegister(1), imm, kScratchReg);
1118       }
1119     } break;
1120     case kMipsExt:
1121       __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1122              i.InputInt8(2));
1123       break;
1124     case kMipsIns:
1125       if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
1126         __ Ins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2));
1127       } else {
1128         __ Ins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1129                i.InputInt8(2));
1130       }
1131       break;
1132     case kMipsRor:
1133       __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1134       break;
1135     case kMipsTst:
1136       __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
1137       break;
1138     case kMipsCmp:
1139       // Pseudo-instruction used for cmp/branch. No opcode emitted here.
1140       break;
1141     case kMipsMov:
1142       // TODO(plind): Should we combine mov/li like this, or use separate instr?
1143       //    - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
1144       if (HasRegisterInput(instr, 0)) {
1145         __ mov(i.OutputRegister(), i.InputRegister(0));
1146       } else {
1147         __ li(i.OutputRegister(), i.InputOperand(0));
1148       }
1149       break;
1150     case kMipsLsa:
1151       DCHECK(instr->InputAt(2)->IsImmediate());
1152       __ Lsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1153              i.InputInt8(2));
1154       break;
1155     case kMipsCmpS: {
1156       FPURegister left = i.InputOrZeroSingleRegister(0);
1157       FPURegister right = i.InputOrZeroSingleRegister(1);
1158       bool predicate;
1159       FPUCondition cc =
1160           FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition());
1161 
1162       if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
1163           !__ IsDoubleZeroRegSet()) {
1164         __ Move(kDoubleRegZero, 0.0);
1165       }
1166 
1167       __ CompareF32(cc, left, right);
1168     } break;
1169     case kMipsAddS:
1170       // TODO(plind): add special case: combine mult & add.
1171       __ add_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1172                i.InputDoubleRegister(1));
1173       break;
1174     case kMipsSubS:
1175       __ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1176                i.InputDoubleRegister(1));
1177       break;
1178     case kMipsMulS:
1179       // TODO(plind): add special case: right op is -1.0, see arm port.
1180       __ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1181                i.InputDoubleRegister(1));
1182       break;
1183     case kMipsDivS:
1184       __ div_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1185                i.InputDoubleRegister(1));
1186       break;
1187     case kMipsAbsS:
1188       if (IsMipsArchVariant(kMips32r6)) {
1189         __ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1190       } else {
1191         __ mfc1(kScratchReg, i.InputSingleRegister(0));
1192         __ Ins(kScratchReg, zero_reg, 31, 1);
1193         __ mtc1(kScratchReg, i.OutputSingleRegister());
1194       }
1195       break;
1196     case kMipsSqrtS: {
1197       __ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1198       break;
1199     }
1200     case kMipsMaxS:
1201       __ max_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1202                i.InputDoubleRegister(1));
1203       break;
1204     case kMipsMinS:
1205       __ min_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1206                i.InputDoubleRegister(1));
1207       break;
1208     case kMipsCmpD: {
1209       FPURegister left = i.InputOrZeroDoubleRegister(0);
1210       FPURegister right = i.InputOrZeroDoubleRegister(1);
1211       bool predicate;
1212       FPUCondition cc =
1213           FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition());
1214       if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
1215           !__ IsDoubleZeroRegSet()) {
1216         __ Move(kDoubleRegZero, 0.0);
1217       }
1218       __ CompareF64(cc, left, right);
1219     } break;
1220     case kMipsAddPair:
1221       __ AddPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
1222                  i.InputRegister(1), i.InputRegister(2), i.InputRegister(3),
1223                  kScratchReg, kScratchReg2);
1224       break;
1225     case kMipsSubPair:
1226       __ SubPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
1227                  i.InputRegister(1), i.InputRegister(2), i.InputRegister(3),
1228                  kScratchReg, kScratchReg2);
1229       break;
1230     case kMipsMulPair: {
1231       __ MulPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
1232                  i.InputRegister(1), i.InputRegister(2), i.InputRegister(3),
1233                  kScratchReg, kScratchReg2);
1234     } break;
1235     case kMipsAddD:
1236       // TODO(plind): add special case: combine mult & add.
1237       __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1238                i.InputDoubleRegister(1));
1239       break;
1240     case kMipsSubD:
1241       __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1242                i.InputDoubleRegister(1));
1243       break;
1244     case kMipsMaddS:
1245       __ Madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
1246                 i.InputFloatRegister(1), i.InputFloatRegister(2),
1247                 kScratchDoubleReg);
1248       break;
1249     case kMipsMaddD:
1250       __ Madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1251                 i.InputDoubleRegister(1), i.InputDoubleRegister(2),
1252                 kScratchDoubleReg);
1253       break;
1254     case kMipsMsubS:
1255       __ Msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
1256                 i.InputFloatRegister(1), i.InputFloatRegister(2),
1257                 kScratchDoubleReg);
1258       break;
1259     case kMipsMsubD:
1260       __ Msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1261                 i.InputDoubleRegister(1), i.InputDoubleRegister(2),
1262                 kScratchDoubleReg);
1263       break;
1264     case kMipsMulD:
1265       // TODO(plind): add special case: right op is -1.0, see arm port.
1266       __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1267                i.InputDoubleRegister(1));
1268       break;
1269     case kMipsDivD:
1270       __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1271                i.InputDoubleRegister(1));
1272       break;
1273     case kMipsModD: {
1274       // TODO(bmeurer): We should really get rid of this special instruction,
1275       // and generate a CallAddress instruction instead.
1276       FrameScope scope(tasm(), StackFrame::MANUAL);
1277       __ PrepareCallCFunction(0, 2, kScratchReg);
1278       __ MovToFloatParameters(i.InputDoubleRegister(0),
1279                               i.InputDoubleRegister(1));
1280       __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
1281       // Move the result in the double result register.
1282       __ MovFromFloatResult(i.OutputDoubleRegister());
1283       break;
1284     }
1285     case kMipsAbsD: {
1286       FPURegister src = i.InputDoubleRegister(0);
1287       FPURegister dst = i.OutputDoubleRegister();
1288       if (IsMipsArchVariant(kMips32r6)) {
1289         __ abs_d(dst, src);
1290       } else {
1291         __ Move(dst, src);
1292         __ mfhc1(kScratchReg, src);
1293         __ Ins(kScratchReg, zero_reg, 31, 1);
1294         __ mthc1(kScratchReg, dst);
1295       }
1296       break;
1297     }
1298     case kMipsNegS:
1299       __ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1300       break;
1301     case kMipsNegD:
1302       __ Neg_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1303       break;
1304     case kMipsSqrtD: {
1305       __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1306       break;
1307     }
1308     case kMipsMaxD:
1309       __ max_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1310                i.InputDoubleRegister(1));
1311       break;
1312     case kMipsMinD:
1313       __ min_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1314                i.InputDoubleRegister(1));
1315       break;
1316     case kMipsFloat64RoundDown: {
1317       __ Floor_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1318       break;
1319     }
1320     case kMipsFloat32RoundDown: {
1321       __ Floor_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1322       break;
1323     }
1324     case kMipsFloat64RoundTruncate: {
1325       __ Trunc_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1326       break;
1327     }
1328     case kMipsFloat32RoundTruncate: {
1329       __ Trunc_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1330       break;
1331     }
1332     case kMipsFloat64RoundUp: {
1333       __ Ceil_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1334       break;
1335     }
1336     case kMipsFloat32RoundUp: {
1337       __ Ceil_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1338       break;
1339     }
1340     case kMipsFloat64RoundTiesEven: {
1341       __ Round_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1342       break;
1343     }
1344     case kMipsFloat32RoundTiesEven: {
1345       __ Round_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1346       break;
1347     }
1348     case kMipsFloat32Max: {
1349       FPURegister dst = i.OutputSingleRegister();
1350       FPURegister src1 = i.InputSingleRegister(0);
1351       FPURegister src2 = i.InputSingleRegister(1);
1352       auto ool = zone()->New<OutOfLineFloat32Max>(this, dst, src1, src2);
1353       __ Float32Max(dst, src1, src2, ool->entry());
1354       __ bind(ool->exit());
1355       break;
1356     }
1357     case kMipsFloat64Max: {
1358       DoubleRegister dst = i.OutputDoubleRegister();
1359       DoubleRegister src1 = i.InputDoubleRegister(0);
1360       DoubleRegister src2 = i.InputDoubleRegister(1);
1361       auto ool = zone()->New<OutOfLineFloat64Max>(this, dst, src1, src2);
1362       __ Float64Max(dst, src1, src2, ool->entry());
1363       __ bind(ool->exit());
1364       break;
1365     }
1366     case kMipsFloat32Min: {
1367       FPURegister dst = i.OutputSingleRegister();
1368       FPURegister src1 = i.InputSingleRegister(0);
1369       FPURegister src2 = i.InputSingleRegister(1);
1370       auto ool = zone()->New<OutOfLineFloat32Min>(this, dst, src1, src2);
1371       __ Float32Min(dst, src1, src2, ool->entry());
1372       __ bind(ool->exit());
1373       break;
1374     }
1375     case kMipsFloat64Min: {
1376       DoubleRegister dst = i.OutputDoubleRegister();
1377       DoubleRegister src1 = i.InputDoubleRegister(0);
1378       DoubleRegister src2 = i.InputDoubleRegister(1);
1379       auto ool = zone()->New<OutOfLineFloat64Min>(this, dst, src1, src2);
1380       __ Float64Min(dst, src1, src2, ool->entry());
1381       __ bind(ool->exit());
1382       break;
1383     }
1384     case kMipsCvtSD: {
1385       __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
1386       break;
1387     }
1388     case kMipsCvtDS: {
1389       __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
1390       break;
1391     }
1392     case kMipsCvtDW: {
1393       FPURegister scratch = kScratchDoubleReg;
1394       __ mtc1(i.InputRegister(0), scratch);
1395       __ cvt_d_w(i.OutputDoubleRegister(), scratch);
1396       break;
1397     }
1398     case kMipsCvtSW: {
1399       FPURegister scratch = kScratchDoubleReg;
1400       __ mtc1(i.InputRegister(0), scratch);
1401       __ cvt_s_w(i.OutputDoubleRegister(), scratch);
1402       break;
1403     }
1404     case kMipsCvtSUw: {
1405       FPURegister scratch = kScratchDoubleReg;
1406       __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
1407       __ cvt_s_d(i.OutputDoubleRegister(), i.OutputDoubleRegister());
1408       break;
1409     }
1410     case kMipsCvtDUw: {
1411       FPURegister scratch = kScratchDoubleReg;
1412       __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
1413       break;
1414     }
1415     case kMipsFloorWD: {
1416       FPURegister scratch = kScratchDoubleReg;
1417       __ Floor_w_d(scratch, i.InputDoubleRegister(0));
1418       __ mfc1(i.OutputRegister(), scratch);
1419       break;
1420     }
1421     case kMipsCeilWD: {
1422       FPURegister scratch = kScratchDoubleReg;
1423       __ Ceil_w_d(scratch, i.InputDoubleRegister(0));
1424       __ mfc1(i.OutputRegister(), scratch);
1425       break;
1426     }
1427     case kMipsRoundWD: {
1428       FPURegister scratch = kScratchDoubleReg;
1429       __ Round_w_d(scratch, i.InputDoubleRegister(0));
1430       __ mfc1(i.OutputRegister(), scratch);
1431       break;
1432     }
1433     case kMipsTruncWD: {
1434       FPURegister scratch = kScratchDoubleReg;
1435       // Other arches use round to zero here, so we follow.
1436       __ Trunc_w_d(scratch, i.InputDoubleRegister(0));
1437       __ mfc1(i.OutputRegister(), scratch);
1438       break;
1439     }
1440     case kMipsFloorWS: {
1441       FPURegister scratch = kScratchDoubleReg;
1442       __ floor_w_s(scratch, i.InputDoubleRegister(0));
1443       __ mfc1(i.OutputRegister(), scratch);
1444       break;
1445     }
1446     case kMipsCeilWS: {
1447       FPURegister scratch = kScratchDoubleReg;
1448       __ ceil_w_s(scratch, i.InputDoubleRegister(0));
1449       __ mfc1(i.OutputRegister(), scratch);
1450       break;
1451     }
1452     case kMipsRoundWS: {
1453       FPURegister scratch = kScratchDoubleReg;
1454       __ round_w_s(scratch, i.InputDoubleRegister(0));
1455       __ mfc1(i.OutputRegister(), scratch);
1456       break;
1457     }
1458     case kMipsTruncWS: {
1459       FPURegister scratch = kScratchDoubleReg;
1460       __ trunc_w_s(scratch, i.InputDoubleRegister(0));
1461       __ mfc1(i.OutputRegister(), scratch);
1462       // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
1463       // because INT32_MIN allows easier out-of-bounds detection.
1464       bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode());
1465       if (set_overflow_to_min_i32) {
1466         __ Addu(kScratchReg, i.OutputRegister(), 1);
1467         __ Slt(kScratchReg2, kScratchReg, i.OutputRegister());
1468         __ Movn(i.OutputRegister(), kScratchReg, kScratchReg2);
1469       }
1470       break;
1471     }
1472     case kMipsTruncUwD: {
1473       FPURegister scratch = kScratchDoubleReg;
1474       __ Trunc_uw_d(i.OutputRegister(), i.InputDoubleRegister(0), scratch);
1475       break;
1476     }
1477     case kMipsTruncUwS: {
1478       FPURegister scratch = kScratchDoubleReg;
1479       __ Trunc_uw_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch);
1480       // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
1481       // because 0 allows easier out-of-bounds detection.
1482       bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode());
1483       if (set_overflow_to_min_i32) {
1484         __ Addu(kScratchReg, i.OutputRegister(), 1);
1485         __ Movz(i.OutputRegister(), zero_reg, kScratchReg);
1486       }
1487       break;
1488     }
1489     case kMipsFloat64ExtractLowWord32:
1490       __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0));
1491       break;
1492     case kMipsFloat64ExtractHighWord32:
1493       __ FmoveHigh(i.OutputRegister(), i.InputDoubleRegister(0));
1494       break;
1495     case kMipsFloat64InsertLowWord32:
1496       __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1));
1497       break;
1498     case kMipsFloat64InsertHighWord32:
1499       __ FmoveHigh(i.OutputDoubleRegister(), i.InputRegister(1));
1500       break;
1501     case kMipsFloat64SilenceNaN:
1502       __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1503       break;
1504 
1505     // ... more basic instructions ...
1506     case kMipsSeb:
1507       __ Seb(i.OutputRegister(), i.InputRegister(0));
1508       break;
1509     case kMipsSeh:
1510       __ Seh(i.OutputRegister(), i.InputRegister(0));
1511       break;
1512     case kMipsLbu:
1513       __ lbu(i.OutputRegister(), i.MemoryOperand());
1514       break;
1515     case kMipsLb:
1516       __ lb(i.OutputRegister(), i.MemoryOperand());
1517       break;
1518     case kMipsSb:
1519       __ sb(i.InputOrZeroRegister(2), i.MemoryOperand());
1520       break;
1521     case kMipsLhu:
1522       __ lhu(i.OutputRegister(), i.MemoryOperand());
1523       break;
1524     case kMipsUlhu:
1525       __ Ulhu(i.OutputRegister(), i.MemoryOperand());
1526       break;
1527     case kMipsLh:
1528       __ lh(i.OutputRegister(), i.MemoryOperand());
1529       break;
1530     case kMipsUlh:
1531       __ Ulh(i.OutputRegister(), i.MemoryOperand());
1532       break;
1533     case kMipsSh:
1534       __ sh(i.InputOrZeroRegister(2), i.MemoryOperand());
1535       break;
1536     case kMipsUsh:
1537       __ Ush(i.InputOrZeroRegister(2), i.MemoryOperand(), kScratchReg);
1538       break;
1539     case kMipsLw:
1540       __ lw(i.OutputRegister(), i.MemoryOperand());
1541       break;
1542     case kMipsUlw:
1543       __ Ulw(i.OutputRegister(), i.MemoryOperand());
1544       break;
1545     case kMipsSw:
1546       __ sw(i.InputOrZeroRegister(2), i.MemoryOperand());
1547       break;
1548     case kMipsUsw:
1549       __ Usw(i.InputOrZeroRegister(2), i.MemoryOperand());
1550       break;
1551     case kMipsLwc1: {
1552       __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
1553       break;
1554     }
1555     case kMipsUlwc1: {
1556       __ Ulwc1(i.OutputSingleRegister(), i.MemoryOperand(), kScratchReg);
1557       break;
1558     }
1559     case kMipsSwc1: {
1560       size_t index = 0;
1561       MemOperand operand = i.MemoryOperand(&index);
1562       FPURegister ft = i.InputOrZeroSingleRegister(index);
1563       if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
1564         __ Move(kDoubleRegZero, 0.0);
1565       }
1566       __ swc1(ft, operand);
1567       break;
1568     }
1569     case kMipsUswc1: {
1570       size_t index = 0;
1571       MemOperand operand = i.MemoryOperand(&index);
1572       FPURegister ft = i.InputOrZeroSingleRegister(index);
1573       if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
1574         __ Move(kDoubleRegZero, 0.0);
1575       }
1576       __ Uswc1(ft, operand, kScratchReg);
1577       break;
1578     }
1579     case kMipsLdc1:
1580       __ Ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
1581       break;
1582     case kMipsUldc1:
1583       __ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
1584       break;
1585     case kMipsSdc1: {
1586       FPURegister ft = i.InputOrZeroDoubleRegister(2);
1587       if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
1588         __ Move(kDoubleRegZero, 0.0);
1589       }
1590       __ Sdc1(ft, i.MemoryOperand());
1591       break;
1592     }
1593     case kMipsUsdc1: {
1594       FPURegister ft = i.InputOrZeroDoubleRegister(2);
1595       if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
1596         __ Move(kDoubleRegZero, 0.0);
1597       }
1598       __ Usdc1(ft, i.MemoryOperand(), kScratchReg);
1599       break;
1600     }
1601     case kMipsSync: {
1602       __ sync();
1603       break;
1604     }
1605     case kMipsPush:
1606       if (instr->InputAt(0)->IsFPRegister()) {
1607         LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
1608         switch (op->representation()) {
1609           case MachineRepresentation::kFloat32:
1610             __ swc1(i.InputFloatRegister(0), MemOperand(sp, -kFloatSize));
1611             __ Subu(sp, sp, Operand(kFloatSize));
1612             frame_access_state()->IncreaseSPDelta(kFloatSize /
1613                                                   kSystemPointerSize);
1614             break;
1615           case MachineRepresentation::kFloat64:
1616             __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
1617             __ Subu(sp, sp, Operand(kDoubleSize));
1618             frame_access_state()->IncreaseSPDelta(kDoubleSize /
1619                                                   kSystemPointerSize);
1620             break;
1621           default: {
1622             UNREACHABLE();
1623           }
1624         }
1625       } else {
1626         __ Push(i.InputRegister(0));
1627         frame_access_state()->IncreaseSPDelta(1);
1628       }
1629       break;
1630     case kMipsPeek: {
1631       int reverse_slot = i.InputInt32(0);
1632       int offset =
1633           FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
1634       if (instr->OutputAt(0)->IsFPRegister()) {
1635         LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
1636         if (op->representation() == MachineRepresentation::kFloat64) {
1637           __ Ldc1(i.OutputDoubleRegister(), MemOperand(fp, offset));
1638         } else if (op->representation() == MachineRepresentation::kFloat32) {
1639           __ lwc1(i.OutputSingleRegister(0), MemOperand(fp, offset));
1640         } else {
1641           DCHECK_EQ(op->representation(), MachineRepresentation::kSimd128);
1642           __ ld_b(i.OutputSimd128Register(), MemOperand(fp, offset));
1643         }
1644       } else {
1645         __ lw(i.OutputRegister(0), MemOperand(fp, offset));
1646       }
1647       break;
1648     }
1649     case kMipsStackClaim: {
1650       __ Subu(sp, sp, Operand(i.InputInt32(0)));
1651       frame_access_state()->IncreaseSPDelta(i.InputInt32(0) /
1652                                             kSystemPointerSize);
1653       break;
1654     }
1655     case kMipsStoreToStackSlot: {
1656       if (instr->InputAt(0)->IsFPRegister()) {
1657         LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
1658         if (op->representation() == MachineRepresentation::kFloat64) {
1659           __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
1660         } else if (op->representation() == MachineRepresentation::kFloat32) {
1661           __ swc1(i.InputSingleRegister(0), MemOperand(sp, i.InputInt32(1)));
1662         } else {
1663           DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
1664           CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1665           __ st_b(i.InputSimd128Register(0), MemOperand(sp, i.InputInt32(1)));
1666         }
1667       } else {
1668         __ sw(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
1669       }
1670       break;
1671     }
1672     case kMipsByteSwap32: {
1673       __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4);
1674       break;
1675     }
1676     case kMipsS128Load8Splat: {
1677       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1678       __ lb(kScratchReg, i.MemoryOperand());
1679       __ fill_b(i.OutputSimd128Register(), kScratchReg);
1680       break;
1681     }
1682     case kMipsS128Load16Splat: {
1683       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1684       __ lh(kScratchReg, i.MemoryOperand());
1685       __ fill_h(i.OutputSimd128Register(), kScratchReg);
1686       break;
1687     }
1688     case kMipsS128Load32Splat: {
1689       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1690       __ Lw(kScratchReg, i.MemoryOperand());
1691       __ fill_w(i.OutputSimd128Register(), kScratchReg);
1692       break;
1693     }
1694     case kMipsS128Load64Splat: {
1695       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1696       Simd128Register dst = i.OutputSimd128Register();
1697       MemOperand memLow = i.MemoryOperand();
1698       MemOperand memHigh = MemOperand(memLow.rm(), memLow.offset() + 4);
1699       __ Lw(kScratchReg, memLow);
1700       __ fill_w(dst, kScratchReg);
1701       __ Lw(kScratchReg, memHigh);
1702       __ fill_w(kSimd128ScratchReg, kScratchReg);
1703       __ ilvr_w(dst, kSimd128ScratchReg, dst);
1704       break;
1705     }
1706     case kMipsS128Load8x8S: {
1707       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1708       Simd128Register dst = i.OutputSimd128Register();
1709       MemOperand memLow = i.MemoryOperand();
1710       MemOperand memHigh = MemOperand(memLow.rm(), memLow.offset() + 4);
1711       __ Lw(kScratchReg, memLow);
1712       __ fill_w(dst, kScratchReg);
1713       __ Lw(kScratchReg, memHigh);
1714       __ fill_w(kSimd128ScratchReg, kScratchReg);
1715       __ ilvr_w(dst, kSimd128ScratchReg, dst);
1716       __ clti_s_b(kSimd128ScratchReg, dst, 0);
1717       __ ilvr_b(dst, kSimd128ScratchReg, dst);
1718       break;
1719     }
1720     case kMipsS128Load8x8U: {
1721       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1722       Simd128Register dst = i.OutputSimd128Register();
1723       MemOperand memLow = i.MemoryOperand();
1724       MemOperand memHigh = MemOperand(memLow.rm(), memLow.offset() + 4);
1725       __ Lw(kScratchReg, memLow);
1726       __ fill_w(dst, kScratchReg);
1727       __ Lw(kScratchReg, memHigh);
1728       __ fill_w(kSimd128ScratchReg, kScratchReg);
1729       __ ilvr_w(dst, kSimd128ScratchReg, dst);
1730       __ ilvr_b(dst, kSimd128RegZero, dst);
1731       break;
1732     }
1733     case kMipsS128Load16x4S: {
1734       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1735       Simd128Register dst = i.OutputSimd128Register();
1736       MemOperand memLow = i.MemoryOperand();
1737       MemOperand memHigh = MemOperand(memLow.rm(), memLow.offset() + 4);
1738       __ Lw(kScratchReg, memLow);
1739       __ fill_w(dst, kScratchReg);
1740       __ Lw(kScratchReg, memHigh);
1741       __ fill_w(kSimd128ScratchReg, kScratchReg);
1742       __ ilvr_w(dst, kSimd128ScratchReg, dst);
1743       __ clti_s_h(kSimd128ScratchReg, dst, 0);
1744       __ ilvr_h(dst, kSimd128ScratchReg, dst);
1745       break;
1746     }
1747     case kMipsS128Load16x4U: {
1748       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1749       Simd128Register dst = i.OutputSimd128Register();
1750       MemOperand memLow = i.MemoryOperand();
1751       MemOperand memHigh = MemOperand(memLow.rm(), memLow.offset() + 4);
1752       __ Lw(kScratchReg, memLow);
1753       __ fill_w(dst, kScratchReg);
1754       __ Lw(kScratchReg, memHigh);
1755       __ fill_w(kSimd128ScratchReg, kScratchReg);
1756       __ ilvr_w(dst, kSimd128ScratchReg, dst);
1757       __ ilvr_h(dst, kSimd128RegZero, dst);
1758       break;
1759     }
1760     case kMipsS128Load32x2S: {
1761       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1762       Simd128Register dst = i.OutputSimd128Register();
1763       MemOperand memLow = i.MemoryOperand();
1764       MemOperand memHigh = MemOperand(memLow.rm(), memLow.offset() + 4);
1765       __ Lw(kScratchReg, memLow);
1766       __ fill_w(dst, kScratchReg);
1767       __ Lw(kScratchReg, memHigh);
1768       __ fill_w(kSimd128ScratchReg, kScratchReg);
1769       __ ilvr_w(dst, kSimd128ScratchReg, dst);
1770       __ clti_s_w(kSimd128ScratchReg, dst, 0);
1771       __ ilvr_w(dst, kSimd128ScratchReg, dst);
1772       break;
1773     }
1774     case kMipsS128Load32x2U: {
1775       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1776       Simd128Register dst = i.OutputSimd128Register();
1777       MemOperand memLow = i.MemoryOperand();
1778       MemOperand memHigh = MemOperand(memLow.rm(), memLow.offset() + 4);
1779       __ Lw(kScratchReg, memLow);
1780       __ fill_w(dst, kScratchReg);
1781       __ Lw(kScratchReg, memHigh);
1782       __ fill_w(kSimd128ScratchReg, kScratchReg);
1783       __ ilvr_w(dst, kSimd128ScratchReg, dst);
1784       __ ilvr_w(dst, kSimd128RegZero, dst);
1785       break;
1786     }
1787     case kAtomicLoadInt8:
1788       ASSEMBLE_ATOMIC_LOAD_INTEGER(lb);
1789       break;
1790     case kAtomicLoadUint8:
1791       ASSEMBLE_ATOMIC_LOAD_INTEGER(lbu);
1792       break;
1793     case kAtomicLoadInt16:
1794       ASSEMBLE_ATOMIC_LOAD_INTEGER(lh);
1795       break;
1796     case kAtomicLoadUint16:
1797       ASSEMBLE_ATOMIC_LOAD_INTEGER(lhu);
1798       break;
1799     case kAtomicLoadWord32:
1800       ASSEMBLE_ATOMIC_LOAD_INTEGER(lw);
1801       break;
1802     case kAtomicStoreWord8:
1803       ASSEMBLE_ATOMIC_STORE_INTEGER(sb);
1804       break;
1805     case kAtomicStoreWord16:
1806       ASSEMBLE_ATOMIC_STORE_INTEGER(sh);
1807       break;
1808     case kAtomicStoreWord32:
1809       ASSEMBLE_ATOMIC_STORE_INTEGER(sw);
1810       break;
1811     case kAtomicExchangeInt8:
1812       ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 8);
1813       break;
1814     case kAtomicExchangeUint8:
1815       ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 8);
1816       break;
1817     case kAtomicExchangeInt16:
1818       ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 16);
1819       break;
1820     case kAtomicExchangeUint16:
1821       ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 16);
1822       break;
1823     case kAtomicExchangeWord32:
1824       ASSEMBLE_ATOMIC_EXCHANGE_INTEGER();
1825       break;
1826     case kAtomicCompareExchangeInt8:
1827       ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 8);
1828       break;
1829     case kAtomicCompareExchangeUint8:
1830       ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 8);
1831       break;
1832     case kAtomicCompareExchangeInt16:
1833       ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 16);
1834       break;
1835     case kAtomicCompareExchangeUint16:
1836       ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 16);
1837       break;
1838     case kAtomicCompareExchangeWord32:
1839       ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER();
1840       break;
1841 #define ATOMIC_BINOP_CASE(op, inst)             \
1842   case kAtomic##op##Int8:                       \
1843     ASSEMBLE_ATOMIC_BINOP_EXT(true, 8, inst);   \
1844     break;                                      \
1845   case kAtomic##op##Uint8:                      \
1846     ASSEMBLE_ATOMIC_BINOP_EXT(false, 8, inst);  \
1847     break;                                      \
1848   case kAtomic##op##Int16:                      \
1849     ASSEMBLE_ATOMIC_BINOP_EXT(true, 16, inst);  \
1850     break;                                      \
1851   case kAtomic##op##Uint16:                     \
1852     ASSEMBLE_ATOMIC_BINOP_EXT(false, 16, inst); \
1853     break;                                      \
1854   case kAtomic##op##Word32:                     \
1855     ASSEMBLE_ATOMIC_BINOP(inst);                \
1856     break;
1857       ATOMIC_BINOP_CASE(Add, Addu)
1858       ATOMIC_BINOP_CASE(Sub, Subu)
1859       ATOMIC_BINOP_CASE(And, And)
1860       ATOMIC_BINOP_CASE(Or, Or)
1861       ATOMIC_BINOP_CASE(Xor, Xor)
1862 #undef ATOMIC_BINOP_CASE
1863     case kMipsWord32AtomicPairLoad: {
1864       if (IsMipsArchVariant(kMips32r6)) {
1865         if (instr->OutputCount() > 0) {
1866           Register second_output = instr->OutputCount() == 2
1867                                        ? i.OutputRegister(1)
1868                                        : i.TempRegister(1);
1869           __ Addu(a0, i.InputRegister(0), i.InputRegister(1));
1870           __ llx(second_output, MemOperand(a0, 4));
1871           __ ll(i.OutputRegister(0), MemOperand(a0, 0));
1872           __ sync();
1873         }
1874       } else {
1875         FrameScope scope(tasm(), StackFrame::MANUAL);
1876         __ Addu(a0, i.InputRegister(0), i.InputRegister(1));
1877         __ PushCallerSaved(SaveFPRegsMode::kIgnore, v0, v1);
1878         __ PrepareCallCFunction(1, 0, kScratchReg);
1879         __ CallCFunction(ExternalReference::atomic_pair_load_function(), 1, 0);
1880         __ PopCallerSaved(SaveFPRegsMode::kIgnore, v0, v1);
1881       }
1882       break;
1883     }
1884     case kMipsWord32AtomicPairStore: {
1885       if (IsMipsArchVariant(kMips32r6)) {
1886         Label store;
1887         __ Addu(a0, i.InputRegister(0), i.InputRegister(1));
1888         __ sync();
1889         __ bind(&store);
1890         __ llx(i.TempRegister(2), MemOperand(a0, 4));
1891         __ ll(i.TempRegister(1), MemOperand(a0, 0));
1892         __ Move(i.TempRegister(1), i.InputRegister(2));
1893         __ scx(i.InputRegister(3), MemOperand(a0, 4));
1894         __ sc(i.TempRegister(1), MemOperand(a0, 0));
1895         __ BranchShort(&store, eq, i.TempRegister(1), Operand(zero_reg));
1896         __ sync();
1897       } else {
1898         FrameScope scope(tasm(), StackFrame::MANUAL);
1899         __ Addu(a0, i.InputRegister(0), i.InputRegister(1));
1900         __ PushCallerSaved(SaveFPRegsMode::kIgnore);
1901         __ PrepareCallCFunction(3, 0, kScratchReg);
1902         __ CallCFunction(ExternalReference::atomic_pair_store_function(), 3, 0);
1903         __ PopCallerSaved(SaveFPRegsMode::kIgnore);
1904       }
1905       break;
1906     }
1907 #define ATOMIC64_BINOP_ARITH_CASE(op, instr, external) \
1908   case kMipsWord32AtomicPair##op:                      \
1909     ASSEMBLE_ATOMIC64_ARITH_BINOP(instr, external);    \
1910     break;
1911       ATOMIC64_BINOP_ARITH_CASE(Add, AddPair, atomic_pair_add_function)
1912       ATOMIC64_BINOP_ARITH_CASE(Sub, SubPair, atomic_pair_sub_function)
1913 #undef ATOMIC64_BINOP_ARITH_CASE
1914 #define ATOMIC64_BINOP_LOGIC_CASE(op, instr, external) \
1915   case kMipsWord32AtomicPair##op:                      \
1916     ASSEMBLE_ATOMIC64_LOGIC_BINOP(instr, external);    \
1917     break;
1918       ATOMIC64_BINOP_LOGIC_CASE(And, AndPair, atomic_pair_and_function)
1919       ATOMIC64_BINOP_LOGIC_CASE(Or, OrPair, atomic_pair_or_function)
1920       ATOMIC64_BINOP_LOGIC_CASE(Xor, XorPair, atomic_pair_xor_function)
1921 #undef ATOMIC64_BINOP_LOGIC_CASE
1922     case kMipsWord32AtomicPairExchange:
1923       if (IsMipsArchVariant(kMips32r6)) {
1924         Label binop;
1925         Register oldval_low =
1926             instr->OutputCount() >= 1 ? i.OutputRegister(0) : i.TempRegister(1);
1927         Register oldval_high =
1928             instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(2);
1929         __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
1930         __ sync();
1931         __ bind(&binop);
1932         __ llx(oldval_high, MemOperand(i.TempRegister(0), 4));
1933         __ ll(oldval_low, MemOperand(i.TempRegister(0), 0));
1934         __ Move(i.TempRegister(1), i.InputRegister(2));
1935         __ scx(i.InputRegister(3), MemOperand(i.TempRegister(0), 4));
1936         __ sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0));
1937         __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg));
1938         __ sync();
1939       } else {
1940         FrameScope scope(tasm(), StackFrame::MANUAL);
1941         __ PushCallerSaved(SaveFPRegsMode::kIgnore, v0, v1);
1942         __ PrepareCallCFunction(3, 0, kScratchReg);
1943         __ Addu(a0, i.InputRegister(0), i.InputRegister(1));
1944         __ CallCFunction(ExternalReference::atomic_pair_exchange_function(), 3,
1945                          0);
1946         __ PopCallerSaved(SaveFPRegsMode::kIgnore, v0, v1);
1947       }
1948       break;
1949     case kMipsWord32AtomicPairCompareExchange: {
1950       if (IsMipsArchVariant(kMips32r6)) {
1951         Label compareExchange, exit;
1952         Register oldval_low =
1953             instr->OutputCount() >= 1 ? i.OutputRegister(0) : kScratchReg;
1954         Register oldval_high =
1955             instr->OutputCount() >= 2 ? i.OutputRegister(1) : kScratchReg2;
1956         __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
1957         __ sync();
1958         __ bind(&compareExchange);
1959         __ llx(oldval_high, MemOperand(i.TempRegister(0), 4));
1960         __ ll(oldval_low, MemOperand(i.TempRegister(0), 0));
1961         __ BranchShort(&exit, ne, i.InputRegister(2), Operand(oldval_low));
1962         __ BranchShort(&exit, ne, i.InputRegister(3), Operand(oldval_high));
1963         __ mov(kScratchReg, i.InputRegister(4));
1964         __ scx(i.InputRegister(5), MemOperand(i.TempRegister(0), 4));
1965         __ sc(kScratchReg, MemOperand(i.TempRegister(0), 0));
1966         __ BranchShort(&compareExchange, eq, kScratchReg, Operand(zero_reg));
1967         __ bind(&exit);
1968         __ sync();
1969       } else {
1970         FrameScope scope(tasm(), StackFrame::MANUAL);
1971         __ PushCallerSaved(SaveFPRegsMode::kIgnore, v0, v1);
1972         __ PrepareCallCFunction(5, 0, kScratchReg);
1973         __ addu(a0, i.InputRegister(0), i.InputRegister(1));
1974         __ sw(i.InputRegister(5), MemOperand(sp, 16));
1975         __ CallCFunction(
1976             ExternalReference::atomic_pair_compare_exchange_function(), 5, 0);
1977         __ PopCallerSaved(SaveFPRegsMode::kIgnore, v0, v1);
1978       }
1979       break;
1980     }
1981     case kMipsS128Zero: {
1982       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1983       __ xor_v(i.OutputSimd128Register(), i.OutputSimd128Register(),
1984                i.OutputSimd128Register());
1985       break;
1986     }
1987     case kMipsI32x4Splat: {
1988       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1989       __ fill_w(i.OutputSimd128Register(), i.InputRegister(0));
1990       break;
1991     }
1992     case kMipsI32x4ExtractLane: {
1993       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1994       __ copy_s_w(i.OutputRegister(), i.InputSimd128Register(0),
1995                   i.InputInt8(1));
1996       break;
1997     }
1998     case kMipsI32x4ReplaceLane: {
1999       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2000       Simd128Register src = i.InputSimd128Register(0);
2001       Simd128Register dst = i.OutputSimd128Register();
2002       if (src != dst) {
2003         __ move_v(dst, src);
2004       }
2005       __ insert_w(dst, i.InputInt8(1), i.InputRegister(2));
2006       break;
2007     }
2008     case kMipsI32x4Add: {
2009       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2010       __ addv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2011                 i.InputSimd128Register(1));
2012       break;
2013     }
2014     case kMipsI32x4Sub: {
2015       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2016       __ subv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2017                 i.InputSimd128Register(1));
2018       break;
2019     }
2020     case kMipsI32x4ExtAddPairwiseI16x8S: {
2021       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2022       __ hadd_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2023                   i.InputSimd128Register(0));
2024       break;
2025     }
2026     case kMipsI32x4ExtAddPairwiseI16x8U: {
2027       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2028       __ hadd_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2029                   i.InputSimd128Register(0));
2030       break;
2031     }
2032     case kMipsF64x2Abs: {
2033       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2034       __ bclri_d(i.OutputSimd128Register(), i.InputSimd128Register(0), 63);
2035       break;
2036     }
2037     case kMipsF64x2Neg: {
2038       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2039       __ bnegi_d(i.OutputSimd128Register(), i.InputSimd128Register(0), 63);
2040       break;
2041     }
2042     case kMipsF64x2Sqrt: {
2043       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2044       __ fsqrt_d(i.OutputSimd128Register(), i.InputSimd128Register(0));
2045       break;
2046     }
2047     case kMipsF64x2Add: {
2048       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2049       ASSEMBLE_F64X2_ARITHMETIC_BINOP(fadd_d);
2050       break;
2051     }
2052     case kMipsF64x2Sub: {
2053       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2054       ASSEMBLE_F64X2_ARITHMETIC_BINOP(fsub_d);
2055       break;
2056     }
2057     case kMipsF64x2Mul: {
2058       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2059       ASSEMBLE_F64X2_ARITHMETIC_BINOP(fmul_d);
2060       break;
2061     }
2062     case kMipsF64x2Div: {
2063       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2064       ASSEMBLE_F64X2_ARITHMETIC_BINOP(fdiv_d);
2065       break;
2066     }
2067     case kMipsF64x2Min: {
2068       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2069       ASSEMBLE_F64X2_ARITHMETIC_BINOP(fmin_d);
2070       break;
2071     }
2072     case kMipsF64x2Max: {
2073       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2074       ASSEMBLE_F64X2_ARITHMETIC_BINOP(fmax_d);
2075       break;
2076     }
2077     case kMipsF64x2Eq: {
2078       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2079       __ fceq_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2080                 i.InputSimd128Register(1));
2081       break;
2082     }
2083     case kMipsF64x2Ne: {
2084       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2085       __ fcne_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2086                 i.InputSimd128Register(1));
2087       break;
2088     }
2089     case kMipsF64x2Lt: {
2090       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2091       __ fclt_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2092                 i.InputSimd128Register(1));
2093       break;
2094     }
2095     case kMipsF64x2Le: {
2096       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2097       __ fcle_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2098                 i.InputSimd128Register(1));
2099       break;
2100     }
2101     case kMipsF64x2Splat: {
2102       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2103       Simd128Register dst = i.OutputSimd128Register();
2104       __ FmoveLow(kScratchReg, i.InputDoubleRegister(0));
2105       __ insert_w(dst, 0, kScratchReg);
2106       __ insert_w(dst, 2, kScratchReg);
2107       __ FmoveHigh(kScratchReg, i.InputDoubleRegister(0));
2108       __ insert_w(dst, 1, kScratchReg);
2109       __ insert_w(dst, 3, kScratchReg);
2110       break;
2111     }
2112     case kMipsF64x2ExtractLane: {
2113       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2114       __ copy_u_w(kScratchReg, i.InputSimd128Register(0), i.InputInt8(1) * 2);
2115       __ FmoveLow(i.OutputDoubleRegister(), kScratchReg);
2116       __ copy_u_w(kScratchReg, i.InputSimd128Register(0),
2117                   i.InputInt8(1) * 2 + 1);
2118       __ FmoveHigh(i.OutputDoubleRegister(), kScratchReg);
2119       break;
2120     }
2121     case kMipsF64x2ReplaceLane: {
2122       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2123       Simd128Register src = i.InputSimd128Register(0);
2124       Simd128Register dst = i.OutputSimd128Register();
2125       if (src != dst) {
2126         __ move_v(dst, src);
2127       }
2128       __ FmoveLow(kScratchReg, i.InputDoubleRegister(2));
2129       __ insert_w(dst, i.InputInt8(1) * 2, kScratchReg);
2130       __ FmoveHigh(kScratchReg, i.InputDoubleRegister(2));
2131       __ insert_w(dst, i.InputInt8(1) * 2 + 1, kScratchReg);
2132       break;
2133     }
2134     case kMipsF64x2Pmin: {
2135       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2136       Simd128Register dst = i.OutputSimd128Register();
2137       Simd128Register lhs = i.InputSimd128Register(0);
2138       Simd128Register rhs = i.InputSimd128Register(1);
2139       // dst = rhs < lhs ? rhs : lhs
2140       __ fclt_d(dst, rhs, lhs);
2141       __ bsel_v(dst, lhs, rhs);
2142       break;
2143     }
2144     case kMipsF64x2Pmax: {
2145       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2146       Simd128Register dst = i.OutputSimd128Register();
2147       Simd128Register lhs = i.InputSimd128Register(0);
2148       Simd128Register rhs = i.InputSimd128Register(1);
2149       // dst = lhs < rhs ? rhs : lhs
2150       __ fclt_d(dst, lhs, rhs);
2151       __ bsel_v(dst, lhs, rhs);
2152       break;
2153     }
2154     case kMipsF64x2Ceil: {
2155       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2156       __ cfcmsa(kScratchReg, MSACSR);
2157       __ li(kScratchReg2, kRoundToPlusInf);
2158       __ ctcmsa(MSACSR, kScratchReg2);
2159       __ frint_d(i.OutputSimd128Register(), i.InputSimd128Register(0));
2160       __ ctcmsa(MSACSR, kScratchReg);
2161       break;
2162     }
2163     case kMipsF64x2Floor: {
2164       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2165       __ cfcmsa(kScratchReg, MSACSR);
2166       __ li(kScratchReg2, kRoundToMinusInf);
2167       __ ctcmsa(MSACSR, kScratchReg2);
2168       __ frint_d(i.OutputSimd128Register(), i.InputSimd128Register(0));
2169       __ ctcmsa(MSACSR, kScratchReg);
2170       break;
2171     }
2172     case kMipsF64x2Trunc: {
2173       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2174       __ cfcmsa(kScratchReg, MSACSR);
2175       __ li(kScratchReg2, kRoundToZero);
2176       __ ctcmsa(MSACSR, kScratchReg2);
2177       __ frint_d(i.OutputSimd128Register(), i.InputSimd128Register(0));
2178       __ ctcmsa(MSACSR, kScratchReg);
2179       break;
2180     }
2181     case kMipsF64x2NearestInt: {
2182       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2183       __ cfcmsa(kScratchReg, MSACSR);
2184       // kRoundToNearest == 0
2185       __ ctcmsa(MSACSR, zero_reg);
2186       __ frint_d(i.OutputSimd128Register(), i.InputSimd128Register(0));
2187       __ ctcmsa(MSACSR, kScratchReg);
2188       break;
2189     }
2190     case kMipsF64x2ConvertLowI32x4S: {
2191       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2192       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2193       __ ilvr_w(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(0));
2194       __ slli_d(kSimd128RegZero, kSimd128RegZero, 32);
2195       __ srai_d(kSimd128RegZero, kSimd128RegZero, 32);
2196       __ ffint_s_d(i.OutputSimd128Register(), kSimd128RegZero);
2197       break;
2198     }
2199     case kMipsF64x2ConvertLowI32x4U: {
2200       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2201       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2202       __ ilvr_w(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(0));
2203       __ ffint_u_d(i.OutputSimd128Register(), kSimd128RegZero);
2204       break;
2205     }
2206     case kMipsF64x2PromoteLowF32x4: {
2207       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2208       __ fexupr_d(i.OutputSimd128Register(), i.InputSimd128Register(0));
2209       break;
2210     }
2211     case kMipsI64x2Add: {
2212       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2213       __ addv_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2214                 i.InputSimd128Register(1));
2215       break;
2216     }
2217     case kMipsI64x2Sub: {
2218       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2219       __ subv_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2220                 i.InputSimd128Register(1));
2221       break;
2222     }
2223     case kMipsI64x2Mul: {
2224       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2225       __ mulv_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2226                 i.InputSimd128Register(1));
2227       break;
2228     }
2229     case kMipsI64x2Neg: {
2230       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2231       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2232       __ subv_d(i.OutputSimd128Register(), kSimd128RegZero,
2233                 i.InputSimd128Register(0));
2234       break;
2235     }
2236     case kMipsI64x2Shl: {
2237       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2238       __ slli_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2239                 i.InputInt6(1));
2240       break;
2241     }
2242     case kMipsI64x2ShrS: {
2243       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2244       __ srai_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2245                 i.InputInt6(1));
2246       break;
2247     }
2248     case kMipsI64x2ShrU: {
2249       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2250       __ srli_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2251                 i.InputInt6(1));
2252       break;
2253     }
2254     case kMipsI64x2BitMask: {
2255       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2256       Register dst = i.OutputRegister();
2257       Simd128Register src = i.InputSimd128Register(0);
2258       Simd128Register scratch0 = kSimd128RegZero;
2259       Simd128Register scratch1 = kSimd128ScratchReg;
2260       __ srli_d(scratch0, src, 63);
2261       __ shf_w(scratch1, scratch0, 0x02);
2262       __ slli_d(scratch1, scratch1, 1);
2263       __ or_v(scratch0, scratch0, scratch1);
2264       __ copy_u_b(dst, scratch0, 0);
2265       break;
2266     }
2267     case kMipsI64x2Eq: {
2268       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2269       __ ceq_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2270                i.InputSimd128Register(1));
2271       break;
2272     }
2273     case kMipsI64x2Ne: {
2274       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2275       __ ceq_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2276                i.InputSimd128Register(1));
2277       __ nor_v(i.OutputSimd128Register(), i.OutputSimd128Register(),
2278                i.OutputSimd128Register());
2279       break;
2280     }
2281     case kMipsI64x2GtS: {
2282       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2283       __ clt_s_d(i.OutputSimd128Register(), i.InputSimd128Register(1),
2284                  i.InputSimd128Register(0));
2285       break;
2286     }
2287     case kMipsI64x2GeS: {
2288       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2289       __ cle_s_d(i.OutputSimd128Register(), i.InputSimd128Register(1),
2290                  i.InputSimd128Register(0));
2291       break;
2292     }
2293     case kMipsI64x2Abs: {
2294       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2295       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2296       __ adds_a_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2297                   kSimd128RegZero);
2298       break;
2299     }
2300     case kMipsI64x2SConvertI32x4Low: {
2301       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2302       Simd128Register dst = i.OutputSimd128Register();
2303       Simd128Register src = i.InputSimd128Register(0);
2304       __ ilvr_w(kSimd128ScratchReg, src, src);
2305       __ slli_d(dst, kSimd128ScratchReg, 32);
2306       __ srai_d(dst, dst, 32);
2307       break;
2308     }
2309     case kMipsI64x2SConvertI32x4High: {
2310       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2311       Simd128Register dst = i.OutputSimd128Register();
2312       Simd128Register src = i.InputSimd128Register(0);
2313       __ ilvl_w(kSimd128ScratchReg, src, src);
2314       __ slli_d(dst, kSimd128ScratchReg, 32);
2315       __ srai_d(dst, dst, 32);
2316       break;
2317     }
2318     case kMipsI64x2UConvertI32x4Low: {
2319       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2320       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2321       __ ilvr_w(i.OutputSimd128Register(), kSimd128RegZero,
2322                 i.InputSimd128Register(0));
2323       break;
2324     }
2325     case kMipsI64x2UConvertI32x4High: {
2326       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2327       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2328       __ ilvl_w(i.OutputSimd128Register(), kSimd128RegZero,
2329                 i.InputSimd128Register(0));
2330       break;
2331     }
2332     case kMipsI64x2ExtMulLowI32x4S:
2333       ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvr_w, dotp_s_d);
2334       break;
2335     case kMipsI64x2ExtMulHighI32x4S:
2336       ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvl_w, dotp_s_d);
2337       break;
2338     case kMipsI64x2ExtMulLowI32x4U:
2339       ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvr_w, dotp_u_d);
2340       break;
2341     case kMipsI64x2ExtMulHighI32x4U:
2342       ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvl_w, dotp_u_d);
2343       break;
2344     case kMipsI32x4ExtMulLowI16x8S:
2345       ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvr_h, dotp_s_w);
2346       break;
2347     case kMipsI32x4ExtMulHighI16x8S:
2348       ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvl_h, dotp_s_w);
2349       break;
2350     case kMipsI32x4ExtMulLowI16x8U:
2351       ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvr_h, dotp_u_w);
2352       break;
2353     case kMipsI32x4ExtMulHighI16x8U:
2354       ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvl_h, dotp_u_w);
2355       break;
2356     case kMipsI16x8ExtMulLowI8x16S:
2357       ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvr_b, dotp_s_h);
2358       break;
2359     case kMipsI16x8ExtMulHighI8x16S:
2360       ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvl_b, dotp_s_h);
2361       break;
2362     case kMipsI16x8ExtMulLowI8x16U:
2363       ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvr_b, dotp_u_h);
2364       break;
2365     case kMipsI16x8ExtMulHighI8x16U:
2366       ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvl_b, dotp_u_h);
2367       break;
2368     case kMipsF32x4Splat: {
2369       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2370       __ FmoveLow(kScratchReg, i.InputSingleRegister(0));
2371       __ fill_w(i.OutputSimd128Register(), kScratchReg);
2372       break;
2373     }
2374     case kMipsF32x4ExtractLane: {
2375       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2376       __ copy_u_w(kScratchReg, i.InputSimd128Register(0), i.InputInt8(1));
2377       __ FmoveLow(i.OutputSingleRegister(), kScratchReg);
2378       break;
2379     }
2380     case kMipsF32x4ReplaceLane: {
2381       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2382       Simd128Register src = i.InputSimd128Register(0);
2383       Simd128Register dst = i.OutputSimd128Register();
2384       if (src != dst) {
2385         __ move_v(dst, src);
2386       }
2387       __ FmoveLow(kScratchReg, i.InputSingleRegister(2));
2388       __ insert_w(dst, i.InputInt8(1), kScratchReg);
2389       break;
2390     }
2391     case kMipsF32x4SConvertI32x4: {
2392       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2393       __ ffint_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2394       break;
2395     }
2396     case kMipsF32x4UConvertI32x4: {
2397       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2398       __ ffint_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2399       break;
2400     }
2401     case kMipsF32x4DemoteF64x2Zero: {
2402       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2403       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2404       __ fexdo_w(i.OutputSimd128Register(), kSimd128RegZero,
2405                  i.InputSimd128Register(0));
2406       break;
2407     }
2408     case kMipsI32x4Mul: {
2409       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2410       __ mulv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2411                 i.InputSimd128Register(1));
2412       break;
2413     }
2414     case kMipsI32x4MaxS: {
2415       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2416       __ max_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2417                  i.InputSimd128Register(1));
2418       break;
2419     }
2420     case kMipsI32x4MinS: {
2421       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2422       __ min_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2423                  i.InputSimd128Register(1));
2424       break;
2425     }
2426     case kMipsI32x4Eq: {
2427       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2428       __ ceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2429                i.InputSimd128Register(1));
2430       break;
2431     }
2432     case kMipsI32x4Ne: {
2433       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2434       Simd128Register dst = i.OutputSimd128Register();
2435       __ ceq_w(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
2436       __ nor_v(dst, dst, dst);
2437       break;
2438     }
2439     case kMipsI32x4Shl: {
2440       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2441       __ slli_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2442                 i.InputInt5(1));
2443       break;
2444     }
2445     case kMipsI32x4ShrS: {
2446       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2447       __ srai_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2448                 i.InputInt5(1));
2449       break;
2450     }
2451     case kMipsI32x4ShrU: {
2452       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2453       __ srli_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2454                 i.InputInt5(1));
2455       break;
2456     }
2457     case kMipsI32x4MaxU: {
2458       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2459       __ max_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2460                  i.InputSimd128Register(1));
2461       break;
2462     }
2463     case kMipsI32x4MinU: {
2464       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2465       __ min_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2466                  i.InputSimd128Register(1));
2467       break;
2468     }
2469     case kMipsS128Select: {
2470       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2471       DCHECK(i.OutputSimd128Register() == i.InputSimd128Register(0));
2472       __ bsel_v(i.OutputSimd128Register(), i.InputSimd128Register(2),
2473                 i.InputSimd128Register(1));
2474       break;
2475     }
2476     case kMipsS128AndNot: {
2477       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2478       Simd128Register dst = i.OutputSimd128Register();
2479       __ nor_v(dst, i.InputSimd128Register(1), i.InputSimd128Register(1));
2480       __ and_v(dst, dst, i.InputSimd128Register(0));
2481       break;
2482     }
2483     case kMipsF32x4Abs: {
2484       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2485       __ bclri_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
2486       break;
2487     }
2488     case kMipsF32x4Neg: {
2489       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2490       __ bnegi_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
2491       break;
2492     }
2493     case kMipsF32x4Sqrt: {
2494       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2495       __ fsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2496       break;
2497     }
2498     case kMipsF32x4RecipApprox: {
2499       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2500       __ frcp_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2501       break;
2502     }
2503     case kMipsF32x4RecipSqrtApprox: {
2504       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2505       __ frsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2506       break;
2507     }
2508     case kMipsF32x4Add: {
2509       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2510       __ fadd_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2511                 i.InputSimd128Register(1));
2512       break;
2513     }
2514     case kMipsF32x4Sub: {
2515       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2516       __ fsub_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2517                 i.InputSimd128Register(1));
2518       break;
2519     }
2520     case kMipsF32x4Mul: {
2521       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2522       __ fmul_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2523                 i.InputSimd128Register(1));
2524       break;
2525     }
2526     case kMipsF32x4Div: {
2527       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2528       __ fdiv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2529                 i.InputSimd128Register(1));
2530       break;
2531     }
2532     case kMipsF32x4Max: {
2533       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2534       __ fmax_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2535                 i.InputSimd128Register(1));
2536       break;
2537     }
2538     case kMipsF32x4Min: {
2539       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2540       __ fmin_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2541                 i.InputSimd128Register(1));
2542       break;
2543     }
2544     case kMipsF32x4Eq: {
2545       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2546       __ fceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2547                 i.InputSimd128Register(1));
2548       break;
2549     }
2550     case kMipsF32x4Ne: {
2551       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2552       __ fcne_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2553                 i.InputSimd128Register(1));
2554       break;
2555     }
2556     case kMipsF32x4Lt: {
2557       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2558       __ fclt_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2559                 i.InputSimd128Register(1));
2560       break;
2561     }
2562     case kMipsF32x4Le: {
2563       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2564       __ fcle_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2565                 i.InputSimd128Register(1));
2566       break;
2567     }
2568     case kMipsF32x4Pmin: {
2569       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2570       Simd128Register dst = i.OutputSimd128Register();
2571       Simd128Register lhs = i.InputSimd128Register(0);
2572       Simd128Register rhs = i.InputSimd128Register(1);
2573       // dst = rhs < lhs ? rhs : lhs
2574       __ fclt_w(dst, rhs, lhs);
2575       __ bsel_v(dst, lhs, rhs);
2576       break;
2577     }
2578     case kMipsF32x4Pmax: {
2579       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2580       Simd128Register dst = i.OutputSimd128Register();
2581       Simd128Register lhs = i.InputSimd128Register(0);
2582       Simd128Register rhs = i.InputSimd128Register(1);
2583       // dst = lhs < rhs ? rhs : lhs
2584       __ fclt_w(dst, lhs, rhs);
2585       __ bsel_v(dst, lhs, rhs);
2586       break;
2587     }
2588     case kMipsF32x4Ceil: {
2589       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2590       __ cfcmsa(kScratchReg, MSACSR);
2591       __ li(kScratchReg2, kRoundToPlusInf);
2592       __ ctcmsa(MSACSR, kScratchReg2);
2593       __ frint_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2594       __ ctcmsa(MSACSR, kScratchReg);
2595       break;
2596     }
2597     case kMipsF32x4Floor: {
2598       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2599       __ cfcmsa(kScratchReg, MSACSR);
2600       __ li(kScratchReg2, kRoundToMinusInf);
2601       __ ctcmsa(MSACSR, kScratchReg2);
2602       __ frint_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2603       __ ctcmsa(MSACSR, kScratchReg);
2604       break;
2605     }
2606     case kMipsF32x4Trunc: {
2607       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2608       __ cfcmsa(kScratchReg, MSACSR);
2609       __ li(kScratchReg2, kRoundToZero);
2610       __ ctcmsa(MSACSR, kScratchReg2);
2611       __ frint_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2612       __ ctcmsa(MSACSR, kScratchReg);
2613       break;
2614     }
2615     case kMipsF32x4NearestInt: {
2616       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2617       __ cfcmsa(kScratchReg, MSACSR);
2618       // kRoundToNearest == 0
2619       __ ctcmsa(MSACSR, zero_reg);
2620       __ frint_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2621       __ ctcmsa(MSACSR, kScratchReg);
2622       break;
2623     }
2624     case kMipsI32x4SConvertF32x4: {
2625       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2626       __ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2627       break;
2628     }
2629     case kMipsI32x4UConvertF32x4: {
2630       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2631       __ ftrunc_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2632       break;
2633     }
2634     case kMipsI32x4Neg: {
2635       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2636       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2637       __ subv_w(i.OutputSimd128Register(), kSimd128RegZero,
2638                 i.InputSimd128Register(0));
2639       break;
2640     }
2641     case kMipsI32x4GtS: {
2642       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2643       __ clt_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
2644                  i.InputSimd128Register(0));
2645       break;
2646     }
2647     case kMipsI32x4GeS: {
2648       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2649       __ cle_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
2650                  i.InputSimd128Register(0));
2651       break;
2652     }
2653     case kMipsI32x4GtU: {
2654       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2655       __ clt_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
2656                  i.InputSimd128Register(0));
2657       break;
2658     }
2659     case kMipsI32x4GeU: {
2660       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2661       __ cle_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
2662                  i.InputSimd128Register(0));
2663       break;
2664     }
2665     case kMipsI32x4Abs: {
2666       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2667       __ asub_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2668                   kSimd128RegZero);
2669       break;
2670     }
2671     case kMipsI32x4BitMask: {
2672       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2673       Register dst = i.OutputRegister();
2674       Simd128Register src = i.InputSimd128Register(0);
2675       Simd128Register scratch0 = kSimd128RegZero;
2676       Simd128Register scratch1 = kSimd128ScratchReg;
2677       __ srli_w(scratch0, src, 31);
2678       __ srli_d(scratch1, scratch0, 31);
2679       __ or_v(scratch0, scratch0, scratch1);
2680       __ shf_w(scratch1, scratch0, 0x0E);
2681       __ slli_d(scratch1, scratch1, 2);
2682       __ or_v(scratch0, scratch0, scratch1);
2683       __ copy_u_b(dst, scratch0, 0);
2684       break;
2685     }
2686     case kMipsI32x4DotI16x8S: {
2687       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2688       __ dotp_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2689                   i.InputSimd128Register(1));
2690       break;
2691     }
2692     case kMipsI32x4TruncSatF64x2SZero: {
2693       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2694       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2695       __ ftrunc_s_d(kSimd128ScratchReg, i.InputSimd128Register(0));
2696       __ sat_s_d(kSimd128ScratchReg, kSimd128ScratchReg, 31);
2697       __ pckev_w(i.OutputSimd128Register(), kSimd128RegZero,
2698                  kSimd128ScratchReg);
2699       break;
2700     }
2701     case kMipsI32x4TruncSatF64x2UZero: {
2702       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2703       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2704       __ ftrunc_u_d(kSimd128ScratchReg, i.InputSimd128Register(0));
2705       __ sat_u_d(kSimd128ScratchReg, kSimd128ScratchReg, 31);
2706       __ pckev_w(i.OutputSimd128Register(), kSimd128RegZero,
2707                  kSimd128ScratchReg);
2708       break;
2709     }
2710     case kMipsI16x8Splat: {
2711       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2712       __ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
2713       break;
2714     }
2715     case kMipsI16x8ExtractLaneU: {
2716       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2717       __ copy_u_h(i.OutputRegister(), i.InputSimd128Register(0),
2718                   i.InputInt8(1));
2719       break;
2720     }
2721     case kMipsI16x8ExtractLaneS: {
2722       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2723       __ copy_s_h(i.OutputRegister(), i.InputSimd128Register(0),
2724                   i.InputInt8(1));
2725       break;
2726     }
2727     case kMipsI16x8ReplaceLane: {
2728       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2729       Simd128Register src = i.InputSimd128Register(0);
2730       Simd128Register dst = i.OutputSimd128Register();
2731       if (src != dst) {
2732         __ move_v(dst, src);
2733       }
2734       __ insert_h(dst, i.InputInt8(1), i.InputRegister(2));
2735       break;
2736     }
2737     case kMipsI16x8Neg: {
2738       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2739       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2740       __ subv_h(i.OutputSimd128Register(), kSimd128RegZero,
2741                 i.InputSimd128Register(0));
2742       break;
2743     }
2744     case kMipsI16x8Shl: {
2745       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2746       __ slli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2747                 i.InputInt4(1));
2748       break;
2749     }
2750     case kMipsI16x8ShrS: {
2751       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2752       __ srai_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2753                 i.InputInt4(1));
2754       break;
2755     }
2756     case kMipsI16x8ShrU: {
2757       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2758       __ srli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2759                 i.InputInt4(1));
2760       break;
2761     }
2762     case kMipsI16x8Add: {
2763       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2764       __ addv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2765                 i.InputSimd128Register(1));
2766       break;
2767     }
2768     case kMipsI16x8AddSatS: {
2769       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2770       __ adds_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2771                   i.InputSimd128Register(1));
2772       break;
2773     }
2774     case kMipsI16x8Sub: {
2775       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2776       __ subv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2777                 i.InputSimd128Register(1));
2778       break;
2779     }
2780     case kMipsI16x8SubSatS: {
2781       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2782       __ subs_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2783                   i.InputSimd128Register(1));
2784       break;
2785     }
2786     case kMipsI16x8Mul: {
2787       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2788       __ mulv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2789                 i.InputSimd128Register(1));
2790       break;
2791     }
2792     case kMipsI16x8MaxS: {
2793       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2794       __ max_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2795                  i.InputSimd128Register(1));
2796       break;
2797     }
2798     case kMipsI16x8MinS: {
2799       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2800       __ min_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2801                  i.InputSimd128Register(1));
2802       break;
2803     }
2804     case kMipsI16x8Eq: {
2805       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2806       __ ceq_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2807                i.InputSimd128Register(1));
2808       break;
2809     }
2810     case kMipsI16x8Ne: {
2811       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2812       Simd128Register dst = i.OutputSimd128Register();
2813       __ ceq_h(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
2814       __ nor_v(dst, dst, dst);
2815       break;
2816     }
2817     case kMipsI16x8GtS: {
2818       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2819       __ clt_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
2820                  i.InputSimd128Register(0));
2821       break;
2822     }
2823     case kMipsI16x8GeS: {
2824       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2825       __ cle_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
2826                  i.InputSimd128Register(0));
2827       break;
2828     }
2829     case kMipsI16x8AddSatU: {
2830       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2831       __ adds_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2832                   i.InputSimd128Register(1));
2833       break;
2834     }
2835     case kMipsI16x8SubSatU: {
2836       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2837       __ subs_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2838                   i.InputSimd128Register(1));
2839       break;
2840     }
2841     case kMipsI16x8MaxU: {
2842       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2843       __ max_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2844                  i.InputSimd128Register(1));
2845       break;
2846     }
2847     case kMipsI16x8MinU: {
2848       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2849       __ min_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2850                  i.InputSimd128Register(1));
2851       break;
2852     }
2853     case kMipsI16x8GtU: {
2854       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2855       __ clt_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
2856                  i.InputSimd128Register(0));
2857       break;
2858     }
2859     case kMipsI16x8GeU: {
2860       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2861       __ cle_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
2862                  i.InputSimd128Register(0));
2863       break;
2864     }
2865     case kMipsI16x8RoundingAverageU: {
2866       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2867       __ aver_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
2868                   i.InputSimd128Register(0));
2869       break;
2870     }
2871     case kMipsI16x8Abs: {
2872       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2873       __ asub_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2874                   kSimd128RegZero);
2875       break;
2876     }
2877     case kMipsI16x8BitMask: {
2878       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2879       Register dst = i.OutputRegister();
2880       Simd128Register src = i.InputSimd128Register(0);
2881       Simd128Register scratch0 = kSimd128RegZero;
2882       Simd128Register scratch1 = kSimd128ScratchReg;
2883       __ srli_h(scratch0, src, 15);
2884       __ srli_w(scratch1, scratch0, 15);
2885       __ or_v(scratch0, scratch0, scratch1);
2886       __ srli_d(scratch1, scratch0, 30);
2887       __ or_v(scratch0, scratch0, scratch1);
2888       __ shf_w(scratch1, scratch0, 0x0E);
2889       __ slli_d(scratch1, scratch1, 4);
2890       __ or_v(scratch0, scratch0, scratch1);
2891       __ copy_u_b(dst, scratch0, 0);
2892       break;
2893     }
2894     case kMipsI16x8Q15MulRSatS: {
2895       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2896       __ mulr_q_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2897                   i.InputSimd128Register(1));
2898       break;
2899     }
2900     case kMipsI16x8ExtAddPairwiseI8x16S: {
2901       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2902       __ hadd_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2903                   i.InputSimd128Register(0));
2904       break;
2905     }
2906     case kMipsI16x8ExtAddPairwiseI8x16U: {
2907       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2908       __ hadd_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2909                   i.InputSimd128Register(0));
2910       break;
2911     }
2912     case kMipsI8x16Splat: {
2913       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2914       __ fill_b(i.OutputSimd128Register(), i.InputRegister(0));
2915       break;
2916     }
2917     case kMipsI8x16ExtractLaneU: {
2918       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2919       __ copy_u_b(i.OutputRegister(), i.InputSimd128Register(0),
2920                   i.InputInt8(1));
2921       break;
2922     }
2923     case kMipsI8x16ExtractLaneS: {
2924       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2925       __ copy_s_b(i.OutputRegister(), i.InputSimd128Register(0),
2926                   i.InputInt8(1));
2927       break;
2928     }
2929     case kMipsI8x16ReplaceLane: {
2930       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2931       Simd128Register src = i.InputSimd128Register(0);
2932       Simd128Register dst = i.OutputSimd128Register();
2933       if (src != dst) {
2934         __ move_v(dst, src);
2935       }
2936       __ insert_b(dst, i.InputInt8(1), i.InputRegister(2));
2937       break;
2938     }
2939     case kMipsI8x16Neg: {
2940       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2941       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2942       __ subv_b(i.OutputSimd128Register(), kSimd128RegZero,
2943                 i.InputSimd128Register(0));
2944       break;
2945     }
2946     case kMipsI8x16Shl: {
2947       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2948       __ slli_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2949                 i.InputInt3(1));
2950       break;
2951     }
2952     case kMipsI8x16ShrS: {
2953       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2954       __ srai_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2955                 i.InputInt3(1));
2956       break;
2957     }
2958     case kMipsI8x16Add: {
2959       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2960       __ addv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2961                 i.InputSimd128Register(1));
2962       break;
2963     }
2964     case kMipsI8x16AddSatS: {
2965       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2966       __ adds_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2967                   i.InputSimd128Register(1));
2968       break;
2969     }
2970     case kMipsI8x16Sub: {
2971       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2972       __ subv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2973                 i.InputSimd128Register(1));
2974       break;
2975     }
2976     case kMipsI8x16SubSatS: {
2977       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2978       __ subs_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2979                   i.InputSimd128Register(1));
2980       break;
2981     }
2982     case kMipsI8x16MaxS: {
2983       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2984       __ max_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2985                  i.InputSimd128Register(1));
2986       break;
2987     }
2988     case kMipsI8x16MinS: {
2989       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2990       __ min_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2991                  i.InputSimd128Register(1));
2992       break;
2993     }
2994     case kMipsI8x16Eq: {
2995       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2996       __ ceq_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2997                i.InputSimd128Register(1));
2998       break;
2999     }
3000     case kMipsI8x16Ne: {
3001       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3002       Simd128Register dst = i.OutputSimd128Register();
3003       __ ceq_b(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
3004       __ nor_v(dst, dst, dst);
3005       break;
3006     }
3007     case kMipsI8x16GtS: {
3008       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3009       __ clt_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
3010                  i.InputSimd128Register(0));
3011       break;
3012     }
3013     case kMipsI8x16GeS: {
3014       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3015       __ cle_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
3016                  i.InputSimd128Register(0));
3017       break;
3018     }
3019     case kMipsI8x16ShrU: {
3020       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3021       __ srli_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3022                 i.InputInt3(1));
3023       break;
3024     }
3025     case kMipsI8x16AddSatU: {
3026       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3027       __ adds_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3028                   i.InputSimd128Register(1));
3029       break;
3030     }
3031     case kMipsI8x16SubSatU: {
3032       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3033       __ subs_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3034                   i.InputSimd128Register(1));
3035       break;
3036     }
3037     case kMipsI8x16MaxU: {
3038       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3039       __ max_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3040                  i.InputSimd128Register(1));
3041       break;
3042     }
3043     case kMipsI8x16MinU: {
3044       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3045       __ min_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3046                  i.InputSimd128Register(1));
3047       break;
3048     }
3049     case kMipsI8x16GtU: {
3050       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3051       __ clt_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
3052                  i.InputSimd128Register(0));
3053       break;
3054     }
3055     case kMipsI8x16GeU: {
3056       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3057       __ cle_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
3058                  i.InputSimd128Register(0));
3059       break;
3060     }
3061     case kMipsI8x16RoundingAverageU: {
3062       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3063       __ aver_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
3064                   i.InputSimd128Register(0));
3065       break;
3066     }
3067     case kMipsI8x16Abs: {
3068       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3069       __ asub_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3070                   kSimd128RegZero);
3071       break;
3072     }
3073     case kMipsI8x16Popcnt: {
3074       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3075       __ pcnt_b(i.OutputSimd128Register(), i.InputSimd128Register(0));
3076       break;
3077     }
3078     case kMipsI8x16BitMask: {
3079       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3080       Register dst = i.OutputRegister();
3081       Simd128Register src = i.InputSimd128Register(0);
3082       Simd128Register scratch0 = kSimd128RegZero;
3083       Simd128Register scratch1 = kSimd128ScratchReg;
3084       __ srli_b(scratch0, src, 7);
3085       __ srli_h(scratch1, scratch0, 7);
3086       __ or_v(scratch0, scratch0, scratch1);
3087       __ srli_w(scratch1, scratch0, 14);
3088       __ or_v(scratch0, scratch0, scratch1);
3089       __ srli_d(scratch1, scratch0, 28);
3090       __ or_v(scratch0, scratch0, scratch1);
3091       __ shf_w(scratch1, scratch0, 0x0E);
3092       __ ilvev_b(scratch0, scratch1, scratch0);
3093       __ copy_u_h(dst, scratch0, 0);
3094       break;
3095     }
3096     case kMipsS128And: {
3097       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3098       __ and_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
3099                i.InputSimd128Register(1));
3100       break;
3101     }
3102     case kMipsS128Or: {
3103       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3104       __ or_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
3105               i.InputSimd128Register(1));
3106       break;
3107     }
3108     case kMipsS128Xor: {
3109       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3110       __ xor_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
3111                i.InputSimd128Register(1));
3112       break;
3113     }
3114     case kMipsS128Not: {
3115       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3116       __ nor_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
3117                i.InputSimd128Register(0));
3118       break;
3119     }
3120     case kMipsV128AnyTrue: {
3121       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3122       Register dst = i.OutputRegister();
3123       Label all_false;
3124 
3125       __ BranchMSA(&all_false, MSA_BRANCH_V, all_zero,
3126                    i.InputSimd128Register(0), USE_DELAY_SLOT);
3127       __ li(dst, 0);  // branch delay slot
3128       __ li(dst, -1);
3129       __ bind(&all_false);
3130       break;
3131     }
3132     case kMipsI64x2AllTrue: {
3133       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3134       Register dst = i.OutputRegister();
3135       Label all_true;
3136       __ BranchMSA(&all_true, MSA_BRANCH_D, all_not_zero,
3137                    i.InputSimd128Register(0), USE_DELAY_SLOT);
3138       __ li(dst, -1);  // branch delay slot
3139       __ li(dst, 0);
3140       __ bind(&all_true);
3141       break;
3142     }
3143     case kMipsI32x4AllTrue: {
3144       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3145       Register dst = i.OutputRegister();
3146       Label all_true;
3147       __ BranchMSA(&all_true, MSA_BRANCH_W, all_not_zero,
3148                    i.InputSimd128Register(0), USE_DELAY_SLOT);
3149       __ li(dst, -1);  // branch delay slot
3150       __ li(dst, 0);
3151       __ bind(&all_true);
3152       break;
3153     }
3154     case kMipsI16x8AllTrue: {
3155       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3156       Register dst = i.OutputRegister();
3157       Label all_true;
3158       __ BranchMSA(&all_true, MSA_BRANCH_H, all_not_zero,
3159                    i.InputSimd128Register(0), USE_DELAY_SLOT);
3160       __ li(dst, -1);  // branch delay slot
3161       __ li(dst, 0);
3162       __ bind(&all_true);
3163       break;
3164     }
3165     case kMipsI8x16AllTrue: {
3166       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3167       Register dst = i.OutputRegister();
3168       Label all_true;
3169       __ BranchMSA(&all_true, MSA_BRANCH_B, all_not_zero,
3170                    i.InputSimd128Register(0), USE_DELAY_SLOT);
3171       __ li(dst, -1);  // branch delay slot
3172       __ li(dst, 0);
3173       __ bind(&all_true);
3174       break;
3175     }
3176     case kMipsMsaLd: {
3177       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3178       __ ld_b(i.OutputSimd128Register(), i.MemoryOperand());
3179       break;
3180     }
3181     case kMipsMsaSt: {
3182       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3183       __ st_b(i.InputSimd128Register(2), i.MemoryOperand());
3184       break;
3185     }
3186     case kMipsS32x4InterleaveRight: {
3187       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3188       Simd128Register dst = i.OutputSimd128Register(),
3189                       src0 = i.InputSimd128Register(0),
3190                       src1 = i.InputSimd128Register(1);
3191       // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
3192       // dst = [5, 1, 4, 0]
3193       __ ilvr_w(dst, src1, src0);
3194       break;
3195     }
3196     case kMipsS32x4InterleaveLeft: {
3197       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3198       Simd128Register dst = i.OutputSimd128Register(),
3199                       src0 = i.InputSimd128Register(0),
3200                       src1 = i.InputSimd128Register(1);
3201       // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
3202       // dst = [7, 3, 6, 2]
3203       __ ilvl_w(dst, src1, src0);
3204       break;
3205     }
3206     case kMipsS32x4PackEven: {
3207       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3208       Simd128Register dst = i.OutputSimd128Register(),
3209                       src0 = i.InputSimd128Register(0),
3210                       src1 = i.InputSimd128Register(1);
3211       // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
3212       // dst = [6, 4, 2, 0]
3213       __ pckev_w(dst, src1, src0);
3214       break;
3215     }
3216     case kMipsS32x4PackOdd: {
3217       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3218       Simd128Register dst = i.OutputSimd128Register(),
3219                       src0 = i.InputSimd128Register(0),
3220                       src1 = i.InputSimd128Register(1);
3221       // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
3222       // dst = [7, 5, 3, 1]
3223       __ pckod_w(dst, src1, src0);
3224       break;
3225     }
3226     case kMipsS32x4InterleaveEven: {
3227       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3228       Simd128Register dst = i.OutputSimd128Register(),
3229                       src0 = i.InputSimd128Register(0),
3230                       src1 = i.InputSimd128Register(1);
3231       // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
3232       // dst = [6, 2, 4, 0]
3233       __ ilvev_w(dst, src1, src0);
3234       break;
3235     }
3236     case kMipsS32x4InterleaveOdd: {
3237       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3238       Simd128Register dst = i.OutputSimd128Register(),
3239                       src0 = i.InputSimd128Register(0),
3240                       src1 = i.InputSimd128Register(1);
3241       // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
3242       // dst = [7, 3, 5, 1]
3243       __ ilvod_w(dst, src1, src0);
3244       break;
3245     }
3246     case kMipsS32x4Shuffle: {
3247       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3248       Simd128Register dst = i.OutputSimd128Register(),
3249                       src0 = i.InputSimd128Register(0),
3250                       src1 = i.InputSimd128Register(1);
3251 
3252       int32_t shuffle = i.InputInt32(2);
3253 
3254       if (src0 == src1) {
3255         // Unary S32x4 shuffles are handled with shf.w instruction
3256         unsigned lane = shuffle & 0xFF;
3257         if (FLAG_debug_code) {
3258           // range of all four lanes, for unary instruction,
3259           // should belong to the same range, which can be one of these:
3260           // [0, 3] or [4, 7]
3261           if (lane >= 4) {
3262             int32_t shuffle_helper = shuffle;
3263             for (int i = 0; i < 4; ++i) {
3264               lane = shuffle_helper & 0xFF;
3265               CHECK_GE(lane, 4);
3266               shuffle_helper >>= 8;
3267             }
3268           }
3269         }
3270         uint32_t i8 = 0;
3271         for (int i = 0; i < 4; i++) {
3272           lane = shuffle & 0xFF;
3273           if (lane >= 4) {
3274             lane -= 4;
3275           }
3276           DCHECK_GT(4, lane);
3277           i8 |= lane << (2 * i);
3278           shuffle >>= 8;
3279         }
3280         __ shf_w(dst, src0, i8);
3281       } else {
3282         // For binary shuffles use vshf.w instruction
3283         if (dst == src0) {
3284           __ move_v(kSimd128ScratchReg, src0);
3285           src0 = kSimd128ScratchReg;
3286         } else if (dst == src1) {
3287           __ move_v(kSimd128ScratchReg, src1);
3288           src1 = kSimd128ScratchReg;
3289         }
3290 
3291         __ li(kScratchReg, i.InputInt32(2));
3292         __ insert_w(dst, 0, kScratchReg);
3293         __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
3294         __ ilvr_b(dst, kSimd128RegZero, dst);
3295         __ ilvr_h(dst, kSimd128RegZero, dst);
3296         __ vshf_w(dst, src1, src0);
3297       }
3298       break;
3299     }
3300     case kMipsS16x8InterleaveRight: {
3301       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3302       Simd128Register dst = i.OutputSimd128Register(),
3303                       src0 = i.InputSimd128Register(0),
3304                       src1 = i.InputSimd128Register(1);
3305       // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
3306       // dst = [11, 3, 10, 2, 9, 1, 8, 0]
3307       __ ilvr_h(dst, src1, src0);
3308       break;
3309     }
3310     case kMipsS16x8InterleaveLeft: {
3311       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3312       Simd128Register dst = i.OutputSimd128Register(),
3313                       src0 = i.InputSimd128Register(0),
3314                       src1 = i.InputSimd128Register(1);
3315       // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
3316       // dst = [15, 7, 14, 6, 13, 5, 12, 4]
3317       __ ilvl_h(dst, src1, src0);
3318       break;
3319     }
3320     case kMipsS16x8PackEven: {
3321       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3322       Simd128Register dst = i.OutputSimd128Register(),
3323                       src0 = i.InputSimd128Register(0),
3324                       src1 = i.InputSimd128Register(1);
3325       // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
3326       // dst = [14, 12, 10, 8, 6, 4, 2, 0]
3327       __ pckev_h(dst, src1, src0);
3328       break;
3329     }
3330     case kMipsS16x8PackOdd: {
3331       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3332       Simd128Register dst = i.OutputSimd128Register(),
3333                       src0 = i.InputSimd128Register(0),
3334                       src1 = i.InputSimd128Register(1);
3335       // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
3336       // dst = [15, 13, 11, 9, 7, 5, 3, 1]
3337       __ pckod_h(dst, src1, src0);
3338       break;
3339     }
3340     case kMipsS16x8InterleaveEven: {
3341       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3342       Simd128Register dst = i.OutputSimd128Register(),
3343                       src0 = i.InputSimd128Register(0),
3344                       src1 = i.InputSimd128Register(1);
3345       // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
3346       // dst = [14, 6, 12, 4, 10, 2, 8, 0]
3347       __ ilvev_h(dst, src1, src0);
3348       break;
3349     }
3350     case kMipsS16x8InterleaveOdd: {
3351       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3352       Simd128Register dst = i.OutputSimd128Register(),
3353                       src0 = i.InputSimd128Register(0),
3354                       src1 = i.InputSimd128Register(1);
3355       // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
3356       // dst = [15, 7, ... 11, 3, 9, 1]
3357       __ ilvod_h(dst, src1, src0);
3358       break;
3359     }
3360     case kMipsS16x4Reverse: {
3361       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3362       // src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [4, 5, 6, 7, 0, 1, 2, 3]
3363       // shf.df imm field: 0 1 2 3 = 00011011 = 0x1B
3364       __ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B);
3365       break;
3366     }
3367     case kMipsS16x2Reverse: {
3368       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3369       // src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [6, 7, 4, 5, 3, 2, 0, 1]
3370       // shf.df imm field: 2 3 0 1 = 10110001 = 0xB1
3371       __ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1);
3372       break;
3373     }
3374     case kMipsS8x16InterleaveRight: {
3375       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3376       Simd128Register dst = i.OutputSimd128Register(),
3377                       src0 = i.InputSimd128Register(0),
3378                       src1 = i.InputSimd128Register(1);
3379       // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
3380       // dst = [23, 7, ... 17, 1, 16, 0]
3381       __ ilvr_b(dst, src1, src0);
3382       break;
3383     }
3384     case kMipsS8x16InterleaveLeft: {
3385       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3386       Simd128Register dst = i.OutputSimd128Register(),
3387                       src0 = i.InputSimd128Register(0),
3388                       src1 = i.InputSimd128Register(1);
3389       // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
3390       // dst = [31, 15, ... 25, 9, 24, 8]
3391       __ ilvl_b(dst, src1, src0);
3392       break;
3393     }
3394     case kMipsS8x16PackEven: {
3395       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3396       Simd128Register dst = i.OutputSimd128Register(),
3397                       src0 = i.InputSimd128Register(0),
3398                       src1 = i.InputSimd128Register(1);
3399       // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
3400       // dst = [30, 28, ... 6, 4, 2, 0]
3401       __ pckev_b(dst, src1, src0);
3402       break;
3403     }
3404     case kMipsS8x16PackOdd: {
3405       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3406       Simd128Register dst = i.OutputSimd128Register(),
3407                       src0 = i.InputSimd128Register(0),
3408                       src1 = i.InputSimd128Register(1);
3409       // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
3410       // dst = [31, 29, ... 7, 5, 3, 1]
3411       __ pckod_b(dst, src1, src0);
3412       break;
3413     }
3414     case kMipsS8x16InterleaveEven: {
3415       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3416       Simd128Register dst = i.OutputSimd128Register(),
3417                       src0 = i.InputSimd128Register(0),
3418                       src1 = i.InputSimd128Register(1);
3419       // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
3420       // dst = [30, 14, ... 18, 2, 16, 0]
3421       __ ilvev_b(dst, src1, src0);
3422       break;
3423     }
3424     case kMipsS8x16InterleaveOdd: {
3425       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3426       Simd128Register dst = i.OutputSimd128Register(),
3427                       src0 = i.InputSimd128Register(0),
3428                       src1 = i.InputSimd128Register(1);
3429       // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
3430       // dst = [31, 15, ... 19, 3, 17, 1]
3431       __ ilvod_b(dst, src1, src0);
3432       break;
3433     }
3434     case kMipsS8x16Concat: {
3435       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3436       Simd128Register dst = i.OutputSimd128Register();
3437       DCHECK(dst == i.InputSimd128Register(0));
3438       __ sldi_b(dst, i.InputSimd128Register(1), i.InputInt4(2));
3439       break;
3440     }
3441     case kMipsI8x16Shuffle: {
3442       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3443       Simd128Register dst = i.OutputSimd128Register(),
3444                       src0 = i.InputSimd128Register(0),
3445                       src1 = i.InputSimd128Register(1);
3446 
3447       if (dst == src0) {
3448         __ move_v(kSimd128ScratchReg, src0);
3449         src0 = kSimd128ScratchReg;
3450       } else if (dst == src1) {
3451         __ move_v(kSimd128ScratchReg, src1);
3452         src1 = kSimd128ScratchReg;
3453       }
3454 
3455       __ li(kScratchReg, i.InputInt32(2));
3456       __ insert_w(dst, 0, kScratchReg);
3457       __ li(kScratchReg, i.InputInt32(3));
3458       __ insert_w(dst, 1, kScratchReg);
3459       __ li(kScratchReg, i.InputInt32(4));
3460       __ insert_w(dst, 2, kScratchReg);
3461       __ li(kScratchReg, i.InputInt32(5));
3462       __ insert_w(dst, 3, kScratchReg);
3463       __ vshf_b(dst, src1, src0);
3464       break;
3465     }
3466     case kMipsI8x16Swizzle: {
3467       Simd128Register dst = i.OutputSimd128Register(),
3468                       tbl = i.InputSimd128Register(0),
3469                       ctl = i.InputSimd128Register(1);
3470       DCHECK(dst != ctl && dst != tbl);
3471       Simd128Register zeroReg = i.TempSimd128Register(0);
3472       __ fill_w(zeroReg, zero_reg);
3473       __ move_v(dst, ctl);
3474       __ vshf_b(dst, tbl, zeroReg);
3475       break;
3476     }
3477     case kMipsS8x8Reverse: {
3478       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3479       // src = [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
3480       // dst = [8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7]
3481       // [A B C D] => [B A D C]: shf.w imm: 2 3 0 1 = 10110001 = 0xB1
3482       // C: [7, 6, 5, 4] => A': [4, 5, 6, 7]: shf.b imm: 00011011 = 0x1B
3483       __ shf_w(kSimd128ScratchReg, i.InputSimd128Register(0), 0xB1);
3484       __ shf_b(i.OutputSimd128Register(), kSimd128ScratchReg, 0x1B);
3485       break;
3486     }
3487     case kMipsS8x4Reverse: {
3488       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3489       // src = [15, 14, ... 3, 2, 1, 0], dst = [12, 13, 14, 15, ... 0, 1, 2, 3]
3490       // shf.df imm field: 0 1 2 3 = 00011011 = 0x1B
3491       __ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B);
3492       break;
3493     }
3494     case kMipsS8x2Reverse: {
3495       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3496       // src = [15, 14, ... 3, 2, 1, 0], dst = [14, 15, 12, 13, ... 2, 3, 0, 1]
3497       // shf.df imm field: 2 3 0 1 = 10110001 = 0xB1
3498       __ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1);
3499       break;
3500     }
3501     case kMipsI32x4SConvertI16x8Low: {
3502       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3503       Simd128Register dst = i.OutputSimd128Register();
3504       Simd128Register src = i.InputSimd128Register(0);
3505       __ ilvr_h(kSimd128ScratchReg, src, src);
3506       __ slli_w(dst, kSimd128ScratchReg, 16);
3507       __ srai_w(dst, dst, 16);
3508       break;
3509     }
3510     case kMipsI32x4SConvertI16x8High: {
3511       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3512       Simd128Register dst = i.OutputSimd128Register();
3513       Simd128Register src = i.InputSimd128Register(0);
3514       __ ilvl_h(kSimd128ScratchReg, src, src);
3515       __ slli_w(dst, kSimd128ScratchReg, 16);
3516       __ srai_w(dst, dst, 16);
3517       break;
3518     }
3519     case kMipsI32x4UConvertI16x8Low: {
3520       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3521       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
3522       __ ilvr_h(i.OutputSimd128Register(), kSimd128RegZero,
3523                 i.InputSimd128Register(0));
3524       break;
3525     }
3526     case kMipsI32x4UConvertI16x8High: {
3527       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3528       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
3529       __ ilvl_h(i.OutputSimd128Register(), kSimd128RegZero,
3530                 i.InputSimd128Register(0));
3531       break;
3532     }
3533     case kMipsI16x8SConvertI8x16Low: {
3534       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3535       Simd128Register dst = i.OutputSimd128Register();
3536       Simd128Register src = i.InputSimd128Register(0);
3537       __ ilvr_b(kSimd128ScratchReg, src, src);
3538       __ slli_h(dst, kSimd128ScratchReg, 8);
3539       __ srai_h(dst, dst, 8);
3540       break;
3541     }
3542     case kMipsI16x8SConvertI8x16High: {
3543       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3544       Simd128Register dst = i.OutputSimd128Register();
3545       Simd128Register src = i.InputSimd128Register(0);
3546       __ ilvl_b(kSimd128ScratchReg, src, src);
3547       __ slli_h(dst, kSimd128ScratchReg, 8);
3548       __ srai_h(dst, dst, 8);
3549       break;
3550     }
3551     case kMipsI16x8SConvertI32x4: {
3552       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3553       Simd128Register dst = i.OutputSimd128Register();
3554       Simd128Register src0 = i.InputSimd128Register(0);
3555       Simd128Register src1 = i.InputSimd128Register(1);
3556       __ sat_s_w(kSimd128ScratchReg, src0, 15);
3557       __ sat_s_w(kSimd128RegZero, src1, 15);  // kSimd128RegZero as scratch
3558       __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
3559       break;
3560     }
3561     case kMipsI16x8UConvertI32x4: {
3562       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3563       Simd128Register dst = i.OutputSimd128Register();
3564       Simd128Register src0 = i.InputSimd128Register(0);
3565       Simd128Register src1 = i.InputSimd128Register(1);
3566       __ sat_u_w(kSimd128ScratchReg, src0, 15);
3567       __ sat_u_w(kSimd128RegZero, src1, 15);  // kSimd128RegZero as scratch
3568       __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
3569       break;
3570     }
3571     case kMipsI16x8UConvertI8x16Low: {
3572       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3573       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
3574       __ ilvr_b(i.OutputSimd128Register(), kSimd128RegZero,
3575                 i.InputSimd128Register(0));
3576       break;
3577     }
3578     case kMipsI16x8UConvertI8x16High: {
3579       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3580       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
3581       __ ilvl_b(i.OutputSimd128Register(), kSimd128RegZero,
3582                 i.InputSimd128Register(0));
3583       break;
3584     }
3585     case kMipsI8x16SConvertI16x8: {
3586       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3587       Simd128Register dst = i.OutputSimd128Register();
3588       Simd128Register src0 = i.InputSimd128Register(0);
3589       Simd128Register src1 = i.InputSimd128Register(1);
3590       __ sat_s_h(kSimd128ScratchReg, src0, 7);
3591       __ sat_s_h(kSimd128RegZero, src1, 7);  // kSimd128RegZero as scratch
3592       __ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg);
3593       break;
3594     }
3595     case kMipsI8x16UConvertI16x8: {
3596       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3597       Simd128Register dst = i.OutputSimd128Register();
3598       Simd128Register src0 = i.InputSimd128Register(0);
3599       Simd128Register src1 = i.InputSimd128Register(1);
3600       __ sat_u_h(kSimd128ScratchReg, src0, 7);
3601       __ sat_u_h(kSimd128RegZero, src1, 7);  // kSimd128RegZero as scratch
3602       __ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg);
3603       break;
3604     }
3605   }
3606   return kSuccess;
3607 }
3608 
AssembleBranchToLabels(CodeGenerator * gen,TurboAssembler * tasm,Instruction * instr,FlagsCondition condition,Label * tlabel,Label * flabel,bool fallthru)3609 void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
3610                             Instruction* instr, FlagsCondition condition,
3611                             Label* tlabel, Label* flabel, bool fallthru) {
3612 #undef __
3613 #define __ tasm->
3614 
3615   Condition cc = kNoCondition;
3616   // MIPS does not have condition code flags, so compare and branch are
3617   // implemented differently than on the other arch's. The compare operations
3618   // emit mips pseudo-instructions, which are handled here by branch
3619   // instructions that do the actual comparison. Essential that the input
3620   // registers to compare pseudo-op are not modified before this branch op, as
3621   // they are tested here.
3622 
3623   MipsOperandConverter i(gen, instr);
3624   if (instr->arch_opcode() == kMipsTst) {
3625     cc = FlagsConditionToConditionTst(condition);
3626     __ Branch(tlabel, cc, kScratchReg, Operand(zero_reg));
3627   } else if (instr->arch_opcode() == kMipsAddOvf ||
3628              instr->arch_opcode() == kMipsSubOvf) {
3629     // Overflow occurs if overflow register is negative
3630     switch (condition) {
3631       case kOverflow:
3632         __ Branch(tlabel, lt, kScratchReg, Operand(zero_reg));
3633         break;
3634       case kNotOverflow:
3635         __ Branch(tlabel, ge, kScratchReg, Operand(zero_reg));
3636         break;
3637       default:
3638         UNSUPPORTED_COND(instr->arch_opcode(), condition);
3639     }
3640   } else if (instr->arch_opcode() == kMipsMulOvf) {
3641     // Overflow occurs if overflow register is not zero
3642     switch (condition) {
3643       case kOverflow:
3644         __ Branch(tlabel, ne, kScratchReg, Operand(zero_reg));
3645         break;
3646       case kNotOverflow:
3647         __ Branch(tlabel, eq, kScratchReg, Operand(zero_reg));
3648         break;
3649       default:
3650         UNSUPPORTED_COND(kMipsMulOvf, condition);
3651     }
3652   } else if (instr->arch_opcode() == kMipsCmp) {
3653     cc = FlagsConditionToConditionCmp(condition);
3654     __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
3655   } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
3656     cc = FlagsConditionToConditionCmp(condition);
3657     DCHECK((cc == ls) || (cc == hi));
3658     if (cc == ls) {
3659       __ xori(i.TempRegister(0), i.TempRegister(0), 1);
3660     }
3661     __ Branch(tlabel, ne, i.TempRegister(0), Operand(zero_reg));
3662   } else if (instr->arch_opcode() == kMipsCmpS ||
3663              instr->arch_opcode() == kMipsCmpD) {
3664     bool predicate;
3665     FlagsConditionToConditionCmpFPU(&predicate, condition);
3666     if (predicate) {
3667       __ BranchTrueF(tlabel);
3668     } else {
3669       __ BranchFalseF(tlabel);
3670     }
3671   } else {
3672     PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
3673            instr->arch_opcode());
3674     UNIMPLEMENTED();
3675   }
3676   if (!fallthru) __ Branch(flabel);  // no fallthru to flabel.
3677 #undef __
3678 #define __ tasm()->
3679 }
3680 
3681 // Assembles branches after an instruction.
AssembleArchBranch(Instruction * instr,BranchInfo * branch)3682 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
3683   Label* tlabel = branch->true_label;
3684   Label* flabel = branch->false_label;
3685   AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel,
3686                          branch->fallthru);
3687 }
3688 
AssembleArchDeoptBranch(Instruction * instr,BranchInfo * branch)3689 void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
3690                                             BranchInfo* branch) {
3691   AssembleArchBranch(instr, branch);
3692 }
3693 
AssembleArchJumpRegardlessOfAssemblyOrder(RpoNumber target)3694 void CodeGenerator::AssembleArchJumpRegardlessOfAssemblyOrder(
3695     RpoNumber target) {
3696   __ Branch(GetLabel(target));
3697 }
3698 
3699 #if V8_ENABLE_WEBASSEMBLY
AssembleArchTrap(Instruction * instr,FlagsCondition condition)3700 void CodeGenerator::AssembleArchTrap(Instruction* instr,
3701                                      FlagsCondition condition) {
3702   class OutOfLineTrap final : public OutOfLineCode {
3703    public:
3704     OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
3705         : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
3706 
3707     void Generate() final {
3708       MipsOperandConverter i(gen_, instr_);
3709       TrapId trap_id =
3710           static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
3711       GenerateCallToTrap(trap_id);
3712     }
3713 
3714    private:
3715     void GenerateCallToTrap(TrapId trap_id) {
3716       if (trap_id == TrapId::kInvalid) {
3717         // We cannot test calls to the runtime in cctest/test-run-wasm.
3718         // Therefore we emit a call to C here instead of a call to the runtime.
3719         // We use the context register as the scratch register, because we do
3720         // not have a context here.
3721         __ PrepareCallCFunction(0, 0, cp);
3722         __ CallCFunction(
3723             ExternalReference::wasm_call_trap_callback_for_testing(), 0);
3724         __ LeaveFrame(StackFrame::WASM);
3725         auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
3726         int pop_count = static_cast<int>(call_descriptor->ParameterSlotCount());
3727         __ Drop(pop_count);
3728         __ Ret();
3729       } else {
3730         gen_->AssembleSourcePosition(instr_);
3731         // A direct call to a wasm runtime stub defined in this module.
3732         // Just encode the stub index. This will be patched when the code
3733         // is added to the native module and copied into wasm code space.
3734         __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
3735         ReferenceMap* reference_map =
3736             gen_->zone()->New<ReferenceMap>(gen_->zone());
3737         gen_->RecordSafepoint(reference_map);
3738         if (FLAG_debug_code) {
3739           __ stop();
3740         }
3741       }
3742     }
3743 
3744     Instruction* instr_;
3745     CodeGenerator* gen_;
3746   };
3747   auto ool = zone()->New<OutOfLineTrap>(this, instr);
3748   Label* tlabel = ool->entry();
3749   AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true);
3750 }
3751 #endif  // V8_ENABLE_WEBASSEMBLY
3752 
3753 // Assembles boolean materializations after an instruction.
AssembleArchBoolean(Instruction * instr,FlagsCondition condition)3754 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
3755                                         FlagsCondition condition) {
3756   MipsOperandConverter i(this, instr);
3757 
3758   // Materialize a full 32-bit 1 or 0 value. The result register is always the
3759   // last output of the instruction.
3760   DCHECK_NE(0u, instr->OutputCount());
3761   Register result = i.OutputRegister(instr->OutputCount() - 1);
3762   Condition cc = kNoCondition;
3763   // MIPS does not have condition code flags, so compare and branch are
3764   // implemented differently than on the other arch's. The compare operations
3765   // emit mips pseudo-instructions, which are checked and handled here.
3766 
3767   if (instr->arch_opcode() == kMipsTst) {
3768     cc = FlagsConditionToConditionTst(condition);
3769     if (cc == eq) {
3770       __ Sltu(result, kScratchReg, 1);
3771     } else {
3772       __ Sltu(result, zero_reg, kScratchReg);
3773     }
3774     return;
3775   } else if (instr->arch_opcode() == kMipsAddOvf ||
3776              instr->arch_opcode() == kMipsSubOvf) {
3777     // Overflow occurs if overflow register is negative
3778     __ slt(result, kScratchReg, zero_reg);
3779   } else if (instr->arch_opcode() == kMipsMulOvf) {
3780     // Overflow occurs if overflow register is not zero
3781     __ Sgtu(result, kScratchReg, zero_reg);
3782   } else if (instr->arch_opcode() == kMipsCmp) {
3783     cc = FlagsConditionToConditionCmp(condition);
3784     switch (cc) {
3785       case eq:
3786       case ne: {
3787         Register left = i.InputRegister(0);
3788         Operand right = i.InputOperand(1);
3789         if (instr->InputAt(1)->IsImmediate()) {
3790           if (is_int16(-right.immediate())) {
3791             if (right.immediate() == 0) {
3792               if (cc == eq) {
3793                 __ Sltu(result, left, 1);
3794               } else {
3795                 __ Sltu(result, zero_reg, left);
3796               }
3797             } else {
3798               __ Addu(result, left, -right.immediate());
3799               if (cc == eq) {
3800                 __ Sltu(result, result, 1);
3801               } else {
3802                 __ Sltu(result, zero_reg, result);
3803               }
3804             }
3805           } else {
3806             if (is_uint16(right.immediate())) {
3807               __ Xor(result, left, right);
3808             } else {
3809               __ li(kScratchReg, right);
3810               __ Xor(result, left, kScratchReg);
3811             }
3812             if (cc == eq) {
3813               __ Sltu(result, result, 1);
3814             } else {
3815               __ Sltu(result, zero_reg, result);
3816             }
3817           }
3818         } else {
3819           __ Xor(result, left, right);
3820           if (cc == eq) {
3821             __ Sltu(result, result, 1);
3822           } else {
3823             __ Sltu(result, zero_reg, result);
3824           }
3825         }
3826       } break;
3827       case lt:
3828       case ge: {
3829         Register left = i.InputRegister(0);
3830         Operand right = i.InputOperand(1);
3831         __ Slt(result, left, right);
3832         if (cc == ge) {
3833           __ xori(result, result, 1);
3834         }
3835       } break;
3836       case gt:
3837       case le: {
3838         Register left = i.InputRegister(1);
3839         Operand right = i.InputOperand(0);
3840         __ Slt(result, left, right);
3841         if (cc == le) {
3842           __ xori(result, result, 1);
3843         }
3844       } break;
3845       case lo:
3846       case hs: {
3847         Register left = i.InputRegister(0);
3848         Operand right = i.InputOperand(1);
3849         __ Sltu(result, left, right);
3850         if (cc == hs) {
3851           __ xori(result, result, 1);
3852         }
3853       } break;
3854       case hi:
3855       case ls: {
3856         Register left = i.InputRegister(1);
3857         Operand right = i.InputOperand(0);
3858         __ Sltu(result, left, right);
3859         if (cc == ls) {
3860           __ xori(result, result, 1);
3861         }
3862       } break;
3863       default:
3864         UNREACHABLE();
3865     }
3866     return;
3867   } else if (instr->arch_opcode() == kMipsCmpD ||
3868              instr->arch_opcode() == kMipsCmpS) {
3869     FPURegister left = i.InputOrZeroDoubleRegister(0);
3870     FPURegister right = i.InputOrZeroDoubleRegister(1);
3871     if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
3872         !__ IsDoubleZeroRegSet()) {
3873       __ Move(kDoubleRegZero, 0.0);
3874     }
3875     bool predicate;
3876     FlagsConditionToConditionCmpFPU(&predicate, condition);
3877     if (!IsMipsArchVariant(kMips32r6)) {
3878       __ li(result, Operand(1));
3879       if (predicate) {
3880         __ Movf(result, zero_reg);
3881       } else {
3882         __ Movt(result, zero_reg);
3883       }
3884     } else {
3885       __ mfc1(result, kDoubleCompareReg);
3886       if (predicate) {
3887         __ And(result, result, 1);  // cmp returns all 1's/0's, use only LSB.
3888       } else {
3889         __ Addu(result, result, 1);  // Toggle result for not equal.
3890       }
3891     }
3892     return;
3893   } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
3894     cc = FlagsConditionToConditionCmp(condition);
3895     DCHECK((cc == ls) || (cc == hi));
3896     if (cc == ls) {
3897       __ xori(i.OutputRegister(), i.TempRegister(0), 1);
3898     }
3899     return;
3900   } else {
3901     PrintF("AssembleArchBoolean Unimplemented arch_opcode is : %d\n",
3902            instr->arch_opcode());
3903     TRACE_UNIMPL();
3904     UNIMPLEMENTED();
3905   }
3906 }
3907 
AssembleArchBinarySearchSwitch(Instruction * instr)3908 void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
3909   MipsOperandConverter i(this, instr);
3910   Register input = i.InputRegister(0);
3911   std::vector<std::pair<int32_t, Label*>> cases;
3912   for (size_t index = 2; index < instr->InputCount(); index += 2) {
3913     cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))});
3914   }
3915   AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(),
3916                                       cases.data() + cases.size());
3917 }
3918 
AssembleArchTableSwitch(Instruction * instr)3919 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
3920   MipsOperandConverter i(this, instr);
3921   Register input = i.InputRegister(0);
3922   size_t const case_count = instr->InputCount() - 2;
3923   __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
3924   __ GenerateSwitchTable(input, case_count, [&i, this](size_t index) {
3925     return GetLabel(i.InputRpo(index + 2));
3926   });
3927 }
3928 
AssembleArchSelect(Instruction * instr,FlagsCondition condition)3929 void CodeGenerator::AssembleArchSelect(Instruction* instr,
3930                                        FlagsCondition condition) {
3931   UNIMPLEMENTED();
3932 }
3933 
FinishFrame(Frame * frame)3934 void CodeGenerator::FinishFrame(Frame* frame) {
3935   auto call_descriptor = linkage()->GetIncomingDescriptor();
3936 
3937   const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
3938   if (!saves_fpu.is_empty()) {
3939     frame->AlignSavedCalleeRegisterSlots();
3940   }
3941 
3942   if (!saves_fpu.is_empty()) {
3943     int count = saves_fpu.Count();
3944     DCHECK_EQ(kNumCalleeSavedFPU, count);
3945     frame->AllocateSavedCalleeRegisterSlots(count *
3946                                             (kDoubleSize / kSystemPointerSize));
3947   }
3948 
3949   const RegList saves = call_descriptor->CalleeSavedRegisters();
3950   if (!saves.is_empty()) {
3951     int count = saves.Count();
3952     frame->AllocateSavedCalleeRegisterSlots(count);
3953   }
3954 }
3955 
AssembleConstructFrame()3956 void CodeGenerator::AssembleConstructFrame() {
3957   auto call_descriptor = linkage()->GetIncomingDescriptor();
3958   if (frame_access_state()->has_frame()) {
3959     if (call_descriptor->IsCFunctionCall()) {
3960 #if V8_ENABLE_WEBASSEMBLY
3961       if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
3962         __ StubPrologue(StackFrame::C_WASM_ENTRY);
3963         // Reserve stack space for saving the c_entry_fp later.
3964         __ Subu(sp, sp, Operand(kSystemPointerSize));
3965 #else
3966       // For balance.
3967       if (false) {
3968 #endif  // V8_ENABLE_WEBASSEMBLY
3969       } else {
3970         __ Push(ra, fp);
3971         __ mov(fp, sp);
3972       }
3973     } else if (call_descriptor->IsJSFunctionCall()) {
3974       __ Prologue();
3975     } else {
3976       __ StubPrologue(info()->GetOutputStackFrameType());
3977 #if V8_ENABLE_WEBASSEMBLY
3978       if (call_descriptor->IsWasmFunctionCall() ||
3979           call_descriptor->IsWasmImportWrapper() ||
3980           call_descriptor->IsWasmCapiFunction()) {
3981         __ Push(kWasmInstanceRegister);
3982       }
3983       if (call_descriptor->IsWasmCapiFunction()) {
3984         // Reserve space for saving the PC later.
3985         __ Subu(sp, sp, Operand(kSystemPointerSize));
3986       }
3987 #endif  // V8_ENABLE_WEBASSEMBLY
3988     }
3989   }
3990 
3991   int required_slots =
3992       frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
3993 
3994   if (info()->is_osr()) {
3995     // TurboFan OSR-compiled functions cannot be entered directly.
3996     __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
3997 
3998     // Unoptimized code jumps directly to this entrypoint while the unoptimized
3999     // frame is still on the stack. Optimized code uses OSR values directly from
4000     // the unoptimized frame. Thus, all that needs to be done is to allocate the
4001     // remaining stack slots.
4002     __ RecordComment("-- OSR entrypoint --");
4003     osr_pc_offset_ = __ pc_offset();
4004     required_slots -= osr_helper()->UnoptimizedFrameSlots();
4005   }
4006 
4007   const RegList saves = call_descriptor->CalleeSavedRegisters();
4008   const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
4009 
4010   if (required_slots > 0) {
4011     DCHECK(frame_access_state()->has_frame());
4012 #if V8_ENABLE_WEBASSEMBLY
4013     if (info()->IsWasm() && required_slots * kSystemPointerSize > 4 * KB) {
4014       // For WebAssembly functions with big frames we have to do the stack
4015       // overflow check before we construct the frame. Otherwise we may not
4016       // have enough space on the stack to call the runtime for the stack
4017       // overflow.
4018       Label done;
4019 
4020       // If the frame is bigger than the stack, we throw the stack overflow
4021       // exception unconditionally. Thereby we can avoid the integer overflow
4022       // check in the condition code.
4023       if (required_slots * kSystemPointerSize < FLAG_stack_size * KB) {
4024         __ Lw(
4025              kScratchReg,
4026              FieldMemOperand(kWasmInstanceRegister,
4027                              WasmInstanceObject::kRealStackLimitAddressOffset));
4028         __ Lw(kScratchReg, MemOperand(kScratchReg));
4029         __ Addu(kScratchReg, kScratchReg,
4030                       Operand(required_slots * kSystemPointerSize));
4031         __ Branch(&done, uge, sp, Operand(kScratchReg));
4032       }
4033 
4034       __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
4035       // The call does not return, hence we can ignore any references and just
4036       // define an empty safepoint.
4037       ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
4038       RecordSafepoint(reference_map);
4039       if (FLAG_debug_code) __ stop();
4040 
4041       __ bind(&done);
4042     }
4043 #endif  // V8_ENABLE_WEBASSEMBLY
4044   }
4045 
4046   const int returns = frame()->GetReturnSlotCount();
4047 
4048   // Skip callee-saved and return slots, which are pushed below.
4049   required_slots -= saves.Count();
4050   required_slots -= 2 * saves_fpu.Count();
4051   required_slots -= returns;
4052   if (required_slots > 0) {
4053     __ Subu(sp, sp, Operand(required_slots * kSystemPointerSize));
4054   }
4055 
4056   // Save callee-saved FPU registers.
4057   if (!saves_fpu.is_empty()) {
4058     __ MultiPushFPU(saves_fpu);
4059   }
4060 
4061   if (!saves.is_empty()) {
4062     // Save callee-saved registers.
4063     __ MultiPush(saves);
4064   }
4065 
4066   if (returns != 0) {
4067     // Create space for returns.
4068     __ Subu(sp, sp, Operand(returns * kSystemPointerSize));
4069   }
4070 }
4071 
4072 void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
4073   auto call_descriptor = linkage()->GetIncomingDescriptor();
4074 
4075   const int returns = frame()->GetReturnSlotCount();
4076   if (returns != 0) {
4077     __ Addu(sp, sp, Operand(returns * kSystemPointerSize));
4078   }
4079 
4080   // Restore GP registers.
4081   const RegList saves = call_descriptor->CalleeSavedRegisters();
4082   if (!saves.is_empty()) {
4083     __ MultiPop(saves);
4084   }
4085 
4086   // Restore FPU registers.
4087   const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
4088   if (!saves_fpu.is_empty()) {
4089     __ MultiPopFPU(saves_fpu);
4090   }
4091 
4092   MipsOperandConverter g(this, nullptr);
4093   const int parameter_slots =
4094       static_cast<int>(call_descriptor->ParameterSlotCount());
4095 
4096   // {aditional_pop_count} is only greater than zero if {parameter_slots = 0}.
4097   // Check RawMachineAssembler::PopAndReturn.
4098   if (parameter_slots != 0) {
4099     if (additional_pop_count->IsImmediate()) {
4100       DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
4101     } else if (FLAG_debug_code) {
4102       __ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue,
4103                 g.ToRegister(additional_pop_count),
4104                 Operand(static_cast<int64_t>(0)));
4105     }
4106   }
4107   // Functions with JS linkage have at least one parameter (the receiver).
4108   // If {parameter_slots} == 0, it means it is a builtin with
4109   // kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
4110   // itself.
4111   const bool drop_jsargs = frame_access_state()->has_frame() &&
4112                            call_descriptor->IsJSFunctionCall() &&
4113                            parameter_slots != 0;
4114 
4115   if (call_descriptor->IsCFunctionCall()) {
4116     AssembleDeconstructFrame();
4117   } else if (frame_access_state()->has_frame()) {
4118     // Canonicalize JSFunction return sites for now unless they have an variable
4119     // number of stack slot pops.
4120     if (additional_pop_count->IsImmediate() &&
4121         g.ToConstant(additional_pop_count).ToInt32() == 0) {
4122       if (return_label_.is_bound()) {
4123         __ Branch(&return_label_);
4124         return;
4125       } else {
4126         __ bind(&return_label_);
4127       }
4128     }
4129     if (drop_jsargs) {
4130       // Get the actual argument count
4131       __ Lw(t0, MemOperand(fp, StandardFrameConstants::kArgCOffset));
4132     }
4133     AssembleDeconstructFrame();
4134   }
4135 
4136   if (drop_jsargs) {
4137     // We must pop all arguments from the stack (including the receiver). This
4138     // number of arguments is given by max(1 + argc_reg, parameter_slots).
4139     if (parameter_slots > 1) {
4140       __ li(kScratchReg, parameter_slots);
4141       __ slt(kScratchReg2, t0, kScratchReg);
4142       __ movn(t0, kScratchReg, kScratchReg2);
4143     }
4144     __ Lsa(sp, sp, t0, kSystemPointerSizeLog2, t0);
4145   } else if (additional_pop_count->IsImmediate()) {
4146     DCHECK_EQ(Constant::kInt32, g.ToConstant(additional_pop_count).type());
4147     int additional_count = g.ToConstant(additional_pop_count).ToInt32();
4148     __ Drop(parameter_slots + additional_count);
4149   } else {
4150     Register pop_reg = g.ToRegister(additional_pop_count);
4151     __ Drop(parameter_slots);
4152     __ Lsa(sp, sp, pop_reg, kSystemPointerSizeLog2, pop_reg);
4153   }
4154   __ Ret();
4155 }
4156 
4157 void CodeGenerator::FinishCode() {}
4158 
4159 void CodeGenerator::PrepareForDeoptimizationExits(
4160     ZoneDeque<DeoptimizationExit*>* exits) {}
4161 
4162 void CodeGenerator::AssembleMove(InstructionOperand* source,
4163                                  InstructionOperand* destination) {
4164   MipsOperandConverter g(this, nullptr);
4165   // Dispatch on the source and destination operand kinds.  Not all
4166   // combinations are possible.
4167   if (source->IsRegister()) {
4168     DCHECK(destination->IsRegister() || destination->IsStackSlot());
4169     Register src = g.ToRegister(source);
4170     if (destination->IsRegister()) {
4171       __ mov(g.ToRegister(destination), src);
4172     } else {
4173       __ sw(src, g.ToMemOperand(destination));
4174     }
4175   } else if (source->IsStackSlot()) {
4176     DCHECK(destination->IsRegister() || destination->IsStackSlot());
4177     MemOperand src = g.ToMemOperand(source);
4178     if (destination->IsRegister()) {
4179       __ lw(g.ToRegister(destination), src);
4180     } else {
4181       Register temp = kScratchReg;
4182       __ lw(temp, src);
4183       __ sw(temp, g.ToMemOperand(destination));
4184     }
4185   } else if (source->IsConstant()) {
4186     Constant src = g.ToConstant(source);
4187     if (destination->IsRegister() || destination->IsStackSlot()) {
4188       Register dst =
4189           destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
4190       switch (src.type()) {
4191         case Constant::kInt32:
4192 #if V8_ENABLE_WEBASSEMBLY
4193           if (RelocInfo::IsWasmReference(src.rmode()))
4194             __ li(dst, Operand(src.ToInt32(), src.rmode()));
4195           else
4196 #endif  // V8_ENABLE_WEBASSEMBLY
4197             __ li(dst, Operand(src.ToInt32()));
4198           break;
4199         case Constant::kFloat32:
4200           __ li(dst, Operand::EmbeddedNumber(src.ToFloat32()));
4201           break;
4202         case Constant::kInt64:
4203           UNREACHABLE();
4204         case Constant::kFloat64:
4205           __ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
4206           break;
4207         case Constant::kExternalReference:
4208           __ li(dst, src.ToExternalReference());
4209           break;
4210         case Constant::kDelayedStringConstant:
4211           __ li(dst, src.ToDelayedStringConstant());
4212           break;
4213         case Constant::kHeapObject: {
4214           Handle<HeapObject> src_object = src.ToHeapObject();
4215           RootIndex index;
4216           if (IsMaterializableFromRoot(src_object, &index)) {
4217             __ LoadRoot(dst, index);
4218           } else {
4219             __ li(dst, src_object);
4220           }
4221           break;
4222         }
4223         case Constant::kCompressedHeapObject:
4224           UNREACHABLE();
4225         case Constant::kRpoNumber:
4226           UNREACHABLE();  // TODO(titzer): loading RPO numbers on mips.
4227       }
4228       if (destination->IsStackSlot()) __ sw(dst, g.ToMemOperand(destination));
4229     } else if (src.type() == Constant::kFloat32) {
4230       if (destination->IsFPStackSlot()) {
4231         MemOperand dst = g.ToMemOperand(destination);
4232         if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
4233           __ sw(zero_reg, dst);
4234         } else {
4235           __ li(kScratchReg, Operand(bit_cast<int32_t>(src.ToFloat32())));
4236           __ sw(kScratchReg, dst);
4237         }
4238       } else {
4239         DCHECK(destination->IsFPRegister());
4240         FloatRegister dst = g.ToSingleRegister(destination);
4241         __ Move(dst, src.ToFloat32());
4242       }
4243     } else {
4244       DCHECK_EQ(Constant::kFloat64, src.type());
4245       DoubleRegister dst = destination->IsFPRegister()
4246                                ? g.ToDoubleRegister(destination)
4247                                : kScratchDoubleReg;
4248       __ Move(dst, src.ToFloat64().value());
4249       if (destination->IsFPStackSlot()) {
4250         __ Sdc1(dst, g.ToMemOperand(destination));
4251       }
4252     }
4253   } else if (source->IsFPRegister()) {
4254     MachineRepresentation rep = LocationOperand::cast(source)->representation();
4255     if (rep == MachineRepresentation::kSimd128) {
4256       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
4257       MSARegister src = g.ToSimd128Register(source);
4258       if (destination->IsSimd128Register()) {
4259         MSARegister dst = g.ToSimd128Register(destination);
4260         __ move_v(dst, src);
4261       } else {
4262         DCHECK(destination->IsSimd128StackSlot());
4263         __ st_b(src, g.ToMemOperand(destination));
4264       }
4265     } else {
4266       FPURegister src = g.ToDoubleRegister(source);
4267       if (destination->IsFPRegister()) {
4268         FPURegister dst = g.ToDoubleRegister(destination);
4269         __ Move(dst, src);
4270       } else {
4271         DCHECK(destination->IsFPStackSlot());
4272         MachineRepresentation rep =
4273             LocationOperand::cast(source)->representation();
4274         if (rep == MachineRepresentation::kFloat64) {
4275           __ Sdc1(src, g.ToMemOperand(destination));
4276         } else if (rep == MachineRepresentation::kFloat32) {
4277           __ swc1(src, g.ToMemOperand(destination));
4278         } else {
4279           UNREACHABLE();
4280         }
4281       }
4282     }
4283   } else if (source->IsFPStackSlot()) {
4284     DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
4285     MemOperand src = g.ToMemOperand(source);
4286     MachineRepresentation rep = LocationOperand::cast(source)->representation();
4287     if (destination->IsFPRegister()) {
4288       if (rep == MachineRepresentation::kFloat64) {
4289         __ Ldc1(g.ToDoubleRegister(destination), src);
4290       } else if (rep == MachineRepresentation::kFloat32) {
4291         __ lwc1(g.ToDoubleRegister(destination), src);
4292       } else {
4293         DCHECK_EQ(MachineRepresentation::kSimd128, rep);
4294         CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
4295         __ ld_b(g.ToSimd128Register(destination), src);
4296       }
4297     } else {
4298       FPURegister temp = kScratchDoubleReg;
4299       if (rep == MachineRepresentation::kFloat64) {
4300         __ Ldc1(temp, src);
4301         __ Sdc1(temp, g.ToMemOperand(destination));
4302       } else if (rep == MachineRepresentation::kFloat32) {
4303         __ lwc1(temp, src);
4304         __ swc1(temp, g.ToMemOperand(destination));
4305       } else {
4306         DCHECK_EQ(MachineRepresentation::kSimd128, rep);
4307         CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
4308         MSARegister temp = kSimd128ScratchReg;
4309         __ ld_b(temp, src);
4310         __ st_b(temp, g.ToMemOperand(destination));
4311       }
4312     }
4313   } else {
4314     UNREACHABLE();
4315   }
4316 }
4317 
4318 void CodeGenerator::AssembleSwap(InstructionOperand* source,
4319                                  InstructionOperand* destination) {
4320   MipsOperandConverter g(this, nullptr);
4321   // Dispatch on the source and destination operand kinds.  Not all
4322   // combinations are possible.
4323   if (source->IsRegister()) {
4324     // Register-register.
4325     Register temp = kScratchReg;
4326     Register src = g.ToRegister(source);
4327     if (destination->IsRegister()) {
4328       Register dst = g.ToRegister(destination);
4329       __ Move(temp, src);
4330       __ Move(src, dst);
4331       __ Move(dst, temp);
4332     } else {
4333       DCHECK(destination->IsStackSlot());
4334       MemOperand dst = g.ToMemOperand(destination);
4335       __ mov(temp, src);
4336       __ lw(src, dst);
4337       __ sw(temp, dst);
4338     }
4339   } else if (source->IsStackSlot()) {
4340     DCHECK(destination->IsStackSlot());
4341     Register temp_0 = kScratchReg;
4342     Register temp_1 = kScratchReg2;
4343     MemOperand src = g.ToMemOperand(source);
4344     MemOperand dst = g.ToMemOperand(destination);
4345     __ lw(temp_0, src);
4346     __ lw(temp_1, dst);
4347     __ sw(temp_0, dst);
4348     __ sw(temp_1, src);
4349   } else if (source->IsFPRegister()) {
4350     if (destination->IsFPRegister()) {
4351       MachineRepresentation rep =
4352           LocationOperand::cast(source)->representation();
4353       if (rep == MachineRepresentation::kSimd128) {
4354         CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
4355         MSARegister temp = kSimd128ScratchReg;
4356         MSARegister src = g.ToSimd128Register(source);
4357         MSARegister dst = g.ToSimd128Register(destination);
4358         __ move_v(temp, src);
4359         __ move_v(src, dst);
4360         __ move_v(dst, temp);
4361       } else {
4362         FPURegister temp = kScratchDoubleReg;
4363         FPURegister src = g.ToDoubleRegister(source);
4364         FPURegister dst = g.ToDoubleRegister(destination);
4365         __ Move(temp, src);
4366         __ Move(src, dst);
4367         __ Move(dst, temp);
4368       }
4369     } else {
4370       DCHECK(destination->IsFPStackSlot());
4371       MemOperand dst = g.ToMemOperand(destination);
4372       MachineRepresentation rep =
4373           LocationOperand::cast(source)->representation();
4374       if (rep == MachineRepresentation::kFloat64) {
4375         FPURegister temp = kScratchDoubleReg;
4376         FPURegister src = g.ToDoubleRegister(source);
4377         __ Move(temp, src);
4378         __ Ldc1(src, dst);
4379         __ Sdc1(temp, dst);
4380       } else if (rep == MachineRepresentation::kFloat32) {
4381         FPURegister temp = kScratchDoubleReg;
4382         FPURegister src = g.ToFloatRegister(source);
4383         __ Move(temp, src);
4384         __ lwc1(src, dst);
4385         __ swc1(temp, dst);
4386       } else {
4387         DCHECK_EQ(MachineRepresentation::kSimd128, rep);
4388         CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
4389         MSARegister temp = kSimd128ScratchReg;
4390         MSARegister src = g.ToSimd128Register(source);
4391         __ move_v(temp, src);
4392         __ ld_b(src, dst);
4393         __ st_b(temp, dst);
4394       }
4395     }
4396   } else if (source->IsFPStackSlot()) {
4397     DCHECK(destination->IsFPStackSlot());
4398     Register temp_0 = kScratchReg;
4399     FPURegister temp_1 = kScratchDoubleReg;
4400     MemOperand src0 = g.ToMemOperand(source);
4401     MemOperand dst0 = g.ToMemOperand(destination);
4402     MachineRepresentation rep = LocationOperand::cast(source)->representation();
4403     if (rep == MachineRepresentation::kFloat64) {
4404       MemOperand src1(src0.rm(), src0.offset() + kIntSize);
4405       MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
4406       __ Ldc1(temp_1, dst0);  // Save destination in temp_1.
4407       __ lw(temp_0, src0);    // Then use temp_0 to copy source to destination.
4408       __ sw(temp_0, dst0);
4409       __ lw(temp_0, src1);
4410       __ sw(temp_0, dst1);
4411       __ Sdc1(temp_1, src0);
4412     } else if (rep == MachineRepresentation::kFloat32) {
4413       __ lwc1(temp_1, dst0);  // Save destination in temp_1.
4414       __ lw(temp_0, src0);    // Then use temp_0 to copy source to destination.
4415       __ sw(temp_0, dst0);
4416       __ swc1(temp_1, src0);
4417     } else {
4418       DCHECK_EQ(MachineRepresentation::kSimd128, rep);
4419       MemOperand src1(src0.rm(), src0.offset() + kIntSize);
4420       MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
4421       MemOperand src2(src0.rm(), src0.offset() + 2 * kIntSize);
4422       MemOperand dst2(dst0.rm(), dst0.offset() + 2 * kIntSize);
4423       MemOperand src3(src0.rm(), src0.offset() + 3 * kIntSize);
4424       MemOperand dst3(dst0.rm(), dst0.offset() + 3 * kIntSize);
4425       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
4426       MSARegister temp_1 = kSimd128ScratchReg;
4427       __ ld_b(temp_1, dst0);  // Save destination in temp_1.
4428       __ lw(temp_0, src0);    // Then use temp_0 to copy source to destination.
4429       __ sw(temp_0, dst0);
4430       __ lw(temp_0, src1);
4431       __ sw(temp_0, dst1);
4432       __ lw(temp_0, src2);
4433       __ sw(temp_0, dst2);
4434       __ lw(temp_0, src3);
4435       __ sw(temp_0, dst3);
4436       __ st_b(temp_1, src0);
4437     }
4438   } else {
4439     // No other combinations are possible.
4440     UNREACHABLE();
4441   }
4442 }
4443 
4444 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
4445   // On 32-bit MIPS we emit the jump tables inline.
4446   UNREACHABLE();
4447 }
4448 
4449 #undef __
4450 #undef ASSEMBLE_F64X2_ARITHMETIC_BINOP
4451 #undef ASSEMBLE_SIMD_EXTENDED_MULTIPLY
4452 
4453 }  // namespace compiler
4454 }  // namespace internal
4455 }  // namespace v8
4456