• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/codegen/assembler-inl.h"
6 #include "src/codegen/callable.h"
7 #include "src/codegen/macro-assembler.h"
8 #include "src/codegen/mips64/constants-mips64.h"
9 #include "src/codegen/optimized-compilation-info.h"
10 #include "src/compiler/backend/code-generator-impl.h"
11 #include "src/compiler/backend/code-generator.h"
12 #include "src/compiler/backend/gap-resolver.h"
13 #include "src/compiler/node-matchers.h"
14 #include "src/compiler/osr.h"
15 #include "src/heap/memory-chunk.h"
16 
17 #if V8_ENABLE_WEBASSEMBLY
18 #include "src/wasm/wasm-code-manager.h"
19 #endif  // V8_ENABLE_WEBASSEMBLY
20 
21 namespace v8 {
22 namespace internal {
23 namespace compiler {
24 
25 #define __ tasm()->
26 
27 // TODO(plind): consider renaming these macros.
28 #define TRACE_MSG(msg)                                                      \
29   PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
30          __LINE__)
31 
32 #define TRACE_UNIMPL()                                                       \
33   PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \
34          __LINE__)
35 
36 // Adds Mips-specific methods to convert InstructionOperands.
37 class MipsOperandConverter final : public InstructionOperandConverter {
38  public:
MipsOperandConverter(CodeGenerator * gen,Instruction * instr)39   MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
40       : InstructionOperandConverter(gen, instr) {}
41 
OutputSingleRegister(size_t index=0)42   FloatRegister OutputSingleRegister(size_t index = 0) {
43     return ToSingleRegister(instr_->OutputAt(index));
44   }
45 
InputSingleRegister(size_t index)46   FloatRegister InputSingleRegister(size_t index) {
47     return ToSingleRegister(instr_->InputAt(index));
48   }
49 
ToSingleRegister(InstructionOperand * op)50   FloatRegister ToSingleRegister(InstructionOperand* op) {
51     // Single (Float) and Double register namespace is same on MIPS,
52     // both are typedefs of FPURegister.
53     return ToDoubleRegister(op);
54   }
55 
InputOrZeroRegister(size_t index)56   Register InputOrZeroRegister(size_t index) {
57     if (instr_->InputAt(index)->IsImmediate()) {
58       DCHECK_EQ(0, InputInt32(index));
59       return zero_reg;
60     }
61     return InputRegister(index);
62   }
63 
InputOrZeroDoubleRegister(size_t index)64   DoubleRegister InputOrZeroDoubleRegister(size_t index) {
65     if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
66 
67     return InputDoubleRegister(index);
68   }
69 
InputOrZeroSingleRegister(size_t index)70   DoubleRegister InputOrZeroSingleRegister(size_t index) {
71     if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
72 
73     return InputSingleRegister(index);
74   }
75 
InputImmediate(size_t index)76   Operand InputImmediate(size_t index) {
77     Constant constant = ToConstant(instr_->InputAt(index));
78     switch (constant.type()) {
79       case Constant::kInt32:
80         return Operand(constant.ToInt32());
81       case Constant::kInt64:
82         return Operand(constant.ToInt64());
83       case Constant::kFloat32:
84         return Operand::EmbeddedNumber(constant.ToFloat32());
85       case Constant::kFloat64:
86         return Operand::EmbeddedNumber(constant.ToFloat64().value());
87       case Constant::kExternalReference:
88       case Constant::kCompressedHeapObject:
89       case Constant::kHeapObject:
90         // TODO(plind): Maybe we should handle ExtRef & HeapObj here?
91         //    maybe not done on arm due to const pool ??
92         break;
93       case Constant::kDelayedStringConstant:
94         return Operand::EmbeddedStringConstant(
95             constant.ToDelayedStringConstant());
96       case Constant::kRpoNumber:
97         UNREACHABLE();  // TODO(titzer): RPO immediates on mips?
98     }
99     UNREACHABLE();
100   }
101 
InputOperand(size_t index)102   Operand InputOperand(size_t index) {
103     InstructionOperand* op = instr_->InputAt(index);
104     if (op->IsRegister()) {
105       return Operand(ToRegister(op));
106     }
107     return InputImmediate(index);
108   }
109 
MemoryOperand(size_t * first_index)110   MemOperand MemoryOperand(size_t* first_index) {
111     const size_t index = *first_index;
112     switch (AddressingModeField::decode(instr_->opcode())) {
113       case kMode_None:
114         break;
115       case kMode_MRI:
116         *first_index += 2;
117         return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
118       case kMode_MRR:
119         // TODO(plind): r6 address mode, to be implemented ...
120         UNREACHABLE();
121     }
122     UNREACHABLE();
123   }
124 
MemoryOperand(size_t index=0)125   MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
126 
ToMemOperand(InstructionOperand * op) const127   MemOperand ToMemOperand(InstructionOperand* op) const {
128     DCHECK_NOT_NULL(op);
129     DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
130     return SlotToMemOperand(AllocatedOperand::cast(op)->index());
131   }
132 
SlotToMemOperand(int slot) const133   MemOperand SlotToMemOperand(int slot) const {
134     FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
135     return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
136   }
137 };
138 
HasRegisterInput(Instruction * instr,size_t index)139 static inline bool HasRegisterInput(Instruction* instr, size_t index) {
140   return instr->InputAt(index)->IsRegister();
141 }
142 
143 namespace {
144 
145 class OutOfLineRecordWrite final : public OutOfLineCode {
146  public:
OutOfLineRecordWrite(CodeGenerator * gen,Register object,Register index,Register value,Register scratch0,Register scratch1,RecordWriteMode mode,StubCallMode stub_mode)147   OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
148                        Register value, Register scratch0, Register scratch1,
149                        RecordWriteMode mode, StubCallMode stub_mode)
150       : OutOfLineCode(gen),
151         object_(object),
152         index_(index),
153         value_(value),
154         scratch0_(scratch0),
155         scratch1_(scratch1),
156         mode_(mode),
157 #if V8_ENABLE_WEBASSEMBLY
158         stub_mode_(stub_mode),
159 #endif  // V8_ENABLE_WEBASSEMBLY
160         must_save_lr_(!gen->frame_access_state()->has_frame()),
161         zone_(gen->zone()) {
162     DCHECK(!AreAliased(object, index, scratch0, scratch1));
163     DCHECK(!AreAliased(value, index, scratch0, scratch1));
164   }
165 
Generate()166   void Generate() final {
167     __ CheckPageFlag(value_, scratch0_,
168                      MemoryChunk::kPointersToHereAreInterestingMask, eq,
169                      exit());
170     __ Daddu(scratch1_, object_, index_);
171     RememberedSetAction const remembered_set_action =
172         mode_ > RecordWriteMode::kValueIsMap ||
173                 FLAG_use_full_record_write_builtin
174             ? RememberedSetAction::kEmit
175             : RememberedSetAction::kOmit;
176     SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
177                                             ? SaveFPRegsMode::kSave
178                                             : SaveFPRegsMode::kIgnore;
179     if (must_save_lr_) {
180       // We need to save and restore ra if the frame was elided.
181       __ Push(ra);
182     }
183     if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
184       __ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode);
185 #if V8_ENABLE_WEBASSEMBLY
186     } else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
187       // A direct call to a wasm runtime stub defined in this module.
188       // Just encode the stub index. This will be patched when the code
189       // is added to the native module and copied into wasm code space.
190       __ CallRecordWriteStubSaveRegisters(object_, scratch1_,
191                                           remembered_set_action, save_fp_mode,
192                                           StubCallMode::kCallWasmRuntimeStub);
193 #endif  // V8_ENABLE_WEBASSEMBLY
194     } else {
195       __ CallRecordWriteStubSaveRegisters(object_, scratch1_,
196                                           remembered_set_action, save_fp_mode);
197     }
198     if (must_save_lr_) {
199       __ Pop(ra);
200     }
201   }
202 
203  private:
204   Register const object_;
205   Register const index_;
206   Register const value_;
207   Register const scratch0_;
208   Register const scratch1_;
209   RecordWriteMode const mode_;
210 #if V8_ENABLE_WEBASSEMBLY
211   StubCallMode const stub_mode_;
212 #endif  // V8_ENABLE_WEBASSEMBLY
213   bool must_save_lr_;
214   Zone* zone_;
215 };
216 
217 #define CREATE_OOL_CLASS(ool_name, tasm_ool_name, T)                 \
218   class ool_name final : public OutOfLineCode {                      \
219    public:                                                           \
220     ool_name(CodeGenerator* gen, T dst, T src1, T src2)              \
221         : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \
222                                                                      \
223     void Generate() final { __ tasm_ool_name(dst_, src1_, src2_); }  \
224                                                                      \
225    private:                                                          \
226     T const dst_;                                                    \
227     T const src1_;                                                   \
228     T const src2_;                                                   \
229   }
230 
231 CREATE_OOL_CLASS(OutOfLineFloat32Max, Float32MaxOutOfLine, FPURegister);
232 CREATE_OOL_CLASS(OutOfLineFloat32Min, Float32MinOutOfLine, FPURegister);
233 CREATE_OOL_CLASS(OutOfLineFloat64Max, Float64MaxOutOfLine, FPURegister);
234 CREATE_OOL_CLASS(OutOfLineFloat64Min, Float64MinOutOfLine, FPURegister);
235 
236 #undef CREATE_OOL_CLASS
237 
FlagsConditionToConditionCmp(FlagsCondition condition)238 Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
239   switch (condition) {
240     case kEqual:
241       return eq;
242     case kNotEqual:
243       return ne;
244     case kSignedLessThan:
245       return lt;
246     case kSignedGreaterThanOrEqual:
247       return ge;
248     case kSignedLessThanOrEqual:
249       return le;
250     case kSignedGreaterThan:
251       return gt;
252     case kUnsignedLessThan:
253       return lo;
254     case kUnsignedGreaterThanOrEqual:
255       return hs;
256     case kUnsignedLessThanOrEqual:
257       return ls;
258     case kUnsignedGreaterThan:
259       return hi;
260     case kUnorderedEqual:
261     case kUnorderedNotEqual:
262       break;
263     default:
264       break;
265   }
266   UNREACHABLE();
267 }
268 
FlagsConditionToConditionTst(FlagsCondition condition)269 Condition FlagsConditionToConditionTst(FlagsCondition condition) {
270   switch (condition) {
271     case kNotEqual:
272       return ne;
273     case kEqual:
274       return eq;
275     default:
276       break;
277   }
278   UNREACHABLE();
279 }
280 
FlagsConditionToConditionOvf(FlagsCondition condition)281 Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
282   switch (condition) {
283     case kOverflow:
284       return ne;
285     case kNotOverflow:
286       return eq;
287     default:
288       break;
289   }
290   UNREACHABLE();
291 }
292 
FlagsConditionToConditionCmpFPU(bool * predicate,FlagsCondition condition)293 FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
294                                              FlagsCondition condition) {
295   switch (condition) {
296     case kEqual:
297       *predicate = true;
298       return EQ;
299     case kNotEqual:
300       *predicate = false;
301       return EQ;
302     case kUnsignedLessThan:
303       *predicate = true;
304       return OLT;
305     case kUnsignedGreaterThanOrEqual:
306       *predicate = false;
307       return OLT;
308     case kUnsignedLessThanOrEqual:
309       *predicate = true;
310       return OLE;
311     case kUnsignedGreaterThan:
312       *predicate = false;
313       return OLE;
314     case kUnorderedEqual:
315     case kUnorderedNotEqual:
316       *predicate = true;
317       break;
318     default:
319       *predicate = true;
320       break;
321   }
322   UNREACHABLE();
323 }
324 
325 }  // namespace
326 
327 #define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr)          \
328   do {                                                   \
329     __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
330     __ sync();                                           \
331   } while (0)
332 
333 #define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr)               \
334   do {                                                         \
335     __ sync();                                                 \
336     __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \
337     __ sync();                                                 \
338   } while (0)
339 
340 #define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr)       \
341   do {                                                                         \
342     Label binop;                                                               \
343     __ Daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));       \
344     __ sync();                                                                 \
345     __ bind(&binop);                                                           \
346     __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0));     \
347     __ bin_instr(i.TempRegister(1), i.OutputRegister(0),                       \
348                  Operand(i.InputRegister(2)));                                 \
349     __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
350     __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg));          \
351     __ sync();                                                                 \
352   } while (0)
353 
354 #define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, sign_extend, \
355                                   size, bin_instr, representation)             \
356   do {                                                                         \
357     Label binop;                                                               \
358     __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));       \
359     if (representation == 32) {                                                \
360       __ andi(i.TempRegister(3), i.TempRegister(0), 0x3);                      \
361     } else {                                                                   \
362       DCHECK_EQ(representation, 64);                                           \
363       __ andi(i.TempRegister(3), i.TempRegister(0), 0x7);                      \
364     }                                                                          \
365     __ Dsubu(i.TempRegister(0), i.TempRegister(0),                             \
366              Operand(i.TempRegister(3)));                                      \
367     __ sll(i.TempRegister(3), i.TempRegister(3), 3);                           \
368     __ sync();                                                                 \
369     __ bind(&binop);                                                           \
370     __ load_linked(i.TempRegister(1), MemOperand(i.TempRegister(0), 0));       \
371     __ ExtractBits(i.OutputRegister(0), i.TempRegister(1), i.TempRegister(3),  \
372                    size, sign_extend);                                         \
373     __ bin_instr(i.TempRegister(2), i.OutputRegister(0),                       \
374                  Operand(i.InputRegister(2)));                                 \
375     __ InsertBits(i.TempRegister(1), i.TempRegister(2), i.TempRegister(3),     \
376                   size);                                                       \
377     __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
378     __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg));          \
379     __ sync();                                                                 \
380   } while (0)
381 
382 #define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_linked, store_conditional)       \
383   do {                                                                         \
384     Label exchange;                                                            \
385     __ sync();                                                                 \
386     __ bind(&exchange);                                                        \
387     __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));       \
388     __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0));     \
389     __ mov(i.TempRegister(1), i.InputRegister(2));                             \
390     __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
391     __ BranchShort(&exchange, eq, i.TempRegister(1), Operand(zero_reg));       \
392     __ sync();                                                                 \
393   } while (0)
394 
395 #define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(                                  \
396     load_linked, store_conditional, sign_extend, size, representation)         \
397   do {                                                                         \
398     Label exchange;                                                            \
399     __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));       \
400     if (representation == 32) {                                                \
401       __ andi(i.TempRegister(1), i.TempRegister(0), 0x3);                      \
402     } else {                                                                   \
403       DCHECK_EQ(representation, 64);                                           \
404       __ andi(i.TempRegister(1), i.TempRegister(0), 0x7);                      \
405     }                                                                          \
406     __ Dsubu(i.TempRegister(0), i.TempRegister(0),                             \
407              Operand(i.TempRegister(1)));                                      \
408     __ sll(i.TempRegister(1), i.TempRegister(1), 3);                           \
409     __ sync();                                                                 \
410     __ bind(&exchange);                                                        \
411     __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0));       \
412     __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1),  \
413                    size, sign_extend);                                         \
414     __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1),    \
415                   size);                                                       \
416     __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
417     __ BranchShort(&exchange, eq, i.TempRegister(2), Operand(zero_reg));       \
418     __ sync();                                                                 \
419   } while (0)
420 
421 #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked,                  \
422                                                  store_conditional)            \
423   do {                                                                         \
424     Label compareExchange;                                                     \
425     Label exit;                                                                \
426     __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));       \
427     __ sync();                                                                 \
428     __ bind(&compareExchange);                                                 \
429     __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0));     \
430     __ BranchShort(&exit, ne, i.InputRegister(2),                              \
431                    Operand(i.OutputRegister(0)));                              \
432     __ mov(i.TempRegister(2), i.InputRegister(3));                             \
433     __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
434     __ BranchShort(&compareExchange, eq, i.TempRegister(2),                    \
435                    Operand(zero_reg));                                         \
436     __ bind(&exit);                                                            \
437     __ sync();                                                                 \
438   } while (0)
439 
440 #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(                          \
441     load_linked, store_conditional, sign_extend, size, representation)         \
442   do {                                                                         \
443     Label compareExchange;                                                     \
444     Label exit;                                                                \
445     __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));       \
446     if (representation == 32) {                                                \
447       __ andi(i.TempRegister(1), i.TempRegister(0), 0x3);                      \
448     } else {                                                                   \
449       DCHECK_EQ(representation, 64);                                           \
450       __ andi(i.TempRegister(1), i.TempRegister(0), 0x7);                      \
451     }                                                                          \
452     __ Dsubu(i.TempRegister(0), i.TempRegister(0),                             \
453              Operand(i.TempRegister(1)));                                      \
454     __ sll(i.TempRegister(1), i.TempRegister(1), 3);                           \
455     __ sync();                                                                 \
456     __ bind(&compareExchange);                                                 \
457     __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0));       \
458     __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1),  \
459                    size, sign_extend);                                         \
460     __ ExtractBits(i.TempRegister(2), i.InputRegister(2), zero_reg, size,      \
461                    sign_extend);                                               \
462     __ BranchShort(&exit, ne, i.TempRegister(2),                               \
463                    Operand(i.OutputRegister(0)));                              \
464     __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1),    \
465                   size);                                                       \
466     __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
467     __ BranchShort(&compareExchange, eq, i.TempRegister(2),                    \
468                    Operand(zero_reg));                                         \
469     __ bind(&exit);                                                            \
470     __ sync();                                                                 \
471   } while (0)
472 
473 #define ASSEMBLE_IEEE754_BINOP(name)                                        \
474   do {                                                                      \
475     FrameScope scope(tasm(), StackFrame::MANUAL);                           \
476     __ PrepareCallCFunction(0, 2, kScratchReg);                             \
477     __ MovToFloatParameters(i.InputDoubleRegister(0),                       \
478                             i.InputDoubleRegister(1));                      \
479     __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
480     /* Move the result in the double result register. */                    \
481     __ MovFromFloatResult(i.OutputDoubleRegister());                        \
482   } while (0)
483 
484 #define ASSEMBLE_IEEE754_UNOP(name)                                         \
485   do {                                                                      \
486     FrameScope scope(tasm(), StackFrame::MANUAL);                           \
487     __ PrepareCallCFunction(0, 1, kScratchReg);                             \
488     __ MovToFloatParameter(i.InputDoubleRegister(0));                       \
489     __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
490     /* Move the result in the double result register. */                    \
491     __ MovFromFloatResult(i.OutputDoubleRegister());                        \
492   } while (0)
493 
494 #define ASSEMBLE_F64X2_ARITHMETIC_BINOP(op)                     \
495   do {                                                          \
496     __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \
497           i.InputSimd128Register(1));                           \
498   } while (0)
499 
AssembleDeconstructFrame()500 void CodeGenerator::AssembleDeconstructFrame() {
501   __ mov(sp, fp);
502   __ Pop(ra, fp);
503 }
504 
AssemblePrepareTailCall()505 void CodeGenerator::AssemblePrepareTailCall() {
506   if (frame_access_state()->has_frame()) {
507     __ Ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
508     __ Ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
509   }
510   frame_access_state()->SetFrameAccessToSP();
511 }
512 
513 namespace {
514 
AdjustStackPointerForTailCall(TurboAssembler * tasm,FrameAccessState * state,int new_slot_above_sp,bool allow_shrinkage=true)515 void AdjustStackPointerForTailCall(TurboAssembler* tasm,
516                                    FrameAccessState* state,
517                                    int new_slot_above_sp,
518                                    bool allow_shrinkage = true) {
519   int current_sp_offset = state->GetSPToFPSlotCount() +
520                           StandardFrameConstants::kFixedSlotCountAboveFp;
521   int stack_slot_delta = new_slot_above_sp - current_sp_offset;
522   if (stack_slot_delta > 0) {
523     tasm->Dsubu(sp, sp, stack_slot_delta * kSystemPointerSize);
524     state->IncreaseSPDelta(stack_slot_delta);
525   } else if (allow_shrinkage && stack_slot_delta < 0) {
526     tasm->Daddu(sp, sp, -stack_slot_delta * kSystemPointerSize);
527     state->IncreaseSPDelta(stack_slot_delta);
528   }
529 }
530 
531 }  // namespace
532 
AssembleTailCallBeforeGap(Instruction * instr,int first_unused_slot_offset)533 void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
534                                               int first_unused_slot_offset) {
535   AdjustStackPointerForTailCall(tasm(), frame_access_state(),
536                                 first_unused_slot_offset, false);
537 }
538 
AssembleTailCallAfterGap(Instruction * instr,int first_unused_slot_offset)539 void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
540                                              int first_unused_slot_offset) {
541   AdjustStackPointerForTailCall(tasm(), frame_access_state(),
542                                 first_unused_slot_offset);
543 }
544 
545 // Check that {kJavaScriptCallCodeStartRegister} is correct.
AssembleCodeStartRegisterCheck()546 void CodeGenerator::AssembleCodeStartRegisterCheck() {
547   __ ComputeCodeStartAddress(kScratchReg);
548   __ Assert(eq, AbortReason::kWrongFunctionCodeStart,
549             kJavaScriptCallCodeStartRegister, Operand(kScratchReg));
550 }
551 
552 // Check if the code object is marked for deoptimization. If it is, then it
553 // jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
554 // to:
555 //    1. read from memory the word that contains that bit, which can be found in
556 //       the flags in the referenced {CodeDataContainer} object;
557 //    2. test kMarkedForDeoptimizationBit in those flags; and
558 //    3. if it is not zero then it jumps to the builtin.
BailoutIfDeoptimized()559 void CodeGenerator::BailoutIfDeoptimized() {
560   int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
561   __ Ld(kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset));
562   __ Lw(kScratchReg,
563         FieldMemOperand(kScratchReg,
564                         CodeDataContainer::kKindSpecificFlagsOffset));
565   __ And(kScratchReg, kScratchReg,
566          Operand(1 << Code::kMarkedForDeoptimizationBit));
567   __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
568           RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
569 }
570 
571 // Assembles an instruction after register allocation, producing machine code.
AssembleArchInstruction(Instruction * instr)572 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
573     Instruction* instr) {
574   MipsOperandConverter i(this, instr);
575   InstructionCode opcode = instr->opcode();
576   ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
577   switch (arch_opcode) {
578     case kArchCallCodeObject: {
579       if (instr->InputAt(0)->IsImmediate()) {
580         __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
581       } else {
582         Register reg = i.InputRegister(0);
583         DCHECK_IMPLIES(
584             instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
585             reg == kJavaScriptCallCodeStartRegister);
586         __ daddiu(reg, reg, Code::kHeaderSize - kHeapObjectTag);
587         __ Call(reg);
588       }
589       RecordCallPosition(instr);
590       frame_access_state()->ClearSPDelta();
591       break;
592     }
593     case kArchCallBuiltinPointer: {
594       DCHECK(!instr->InputAt(0)->IsImmediate());
595       Register builtin_index = i.InputRegister(0);
596       __ CallBuiltinByIndex(builtin_index);
597       RecordCallPosition(instr);
598       frame_access_state()->ClearSPDelta();
599       break;
600     }
601 #if V8_ENABLE_WEBASSEMBLY
602     case kArchCallWasmFunction: {
603       if (instr->InputAt(0)->IsImmediate()) {
604         Constant constant = i.ToConstant(instr->InputAt(0));
605         Address wasm_code = static_cast<Address>(constant.ToInt64());
606         __ Call(wasm_code, constant.rmode());
607       } else {
608         __ daddiu(kScratchReg, i.InputRegister(0), 0);
609         __ Call(kScratchReg);
610       }
611       RecordCallPosition(instr);
612       frame_access_state()->ClearSPDelta();
613       break;
614     }
615     case kArchTailCallWasm: {
616       if (instr->InputAt(0)->IsImmediate()) {
617         Constant constant = i.ToConstant(instr->InputAt(0));
618         Address wasm_code = static_cast<Address>(constant.ToInt64());
619         __ Jump(wasm_code, constant.rmode());
620       } else {
621         __ daddiu(kScratchReg, i.InputRegister(0), 0);
622         __ Jump(kScratchReg);
623       }
624       frame_access_state()->ClearSPDelta();
625       frame_access_state()->SetFrameAccessToDefault();
626       break;
627     }
628 #endif  // V8_ENABLE_WEBASSEMBLY
629     case kArchTailCallCodeObject: {
630       if (instr->InputAt(0)->IsImmediate()) {
631         __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
632       } else {
633         Register reg = i.InputRegister(0);
634         DCHECK_IMPLIES(
635             instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
636             reg == kJavaScriptCallCodeStartRegister);
637         __ daddiu(reg, reg, Code::kHeaderSize - kHeapObjectTag);
638         __ Jump(reg);
639       }
640       frame_access_state()->ClearSPDelta();
641       frame_access_state()->SetFrameAccessToDefault();
642       break;
643     }
644     case kArchTailCallAddress: {
645       CHECK(!instr->InputAt(0)->IsImmediate());
646       Register reg = i.InputRegister(0);
647       DCHECK_IMPLIES(
648           instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
649           reg == kJavaScriptCallCodeStartRegister);
650       __ Jump(reg);
651       frame_access_state()->ClearSPDelta();
652       frame_access_state()->SetFrameAccessToDefault();
653       break;
654     }
655     case kArchCallJSFunction: {
656       Register func = i.InputRegister(0);
657       if (FLAG_debug_code) {
658         // Check the function's context matches the context argument.
659         __ Ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
660         __ Assert(eq, AbortReason::kWrongFunctionContext, cp,
661                   Operand(kScratchReg));
662       }
663       static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
664       __ Ld(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
665       __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
666       __ Call(a2);
667       RecordCallPosition(instr);
668       frame_access_state()->ClearSPDelta();
669       break;
670     }
671     case kArchPrepareCallCFunction: {
672       int const num_parameters = MiscField::decode(instr->opcode());
673       __ PrepareCallCFunction(num_parameters, kScratchReg);
674       // Frame alignment requires using FP-relative frame addressing.
675       frame_access_state()->SetFrameAccessToFP();
676       break;
677     }
678     case kArchSaveCallerRegisters: {
679       fp_mode_ =
680           static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
681       DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
682              fp_mode_ == SaveFPRegsMode::kSave);
683       // kReturnRegister0 should have been saved before entering the stub.
684       int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
685       DCHECK(IsAligned(bytes, kSystemPointerSize));
686       DCHECK_EQ(0, frame_access_state()->sp_delta());
687       frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
688       DCHECK(!caller_registers_saved_);
689       caller_registers_saved_ = true;
690       break;
691     }
692     case kArchRestoreCallerRegisters: {
693       DCHECK(fp_mode_ ==
694              static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
695       DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
696              fp_mode_ == SaveFPRegsMode::kSave);
697       // Don't overwrite the returned value.
698       int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
699       frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
700       DCHECK_EQ(0, frame_access_state()->sp_delta());
701       DCHECK(caller_registers_saved_);
702       caller_registers_saved_ = false;
703       break;
704     }
705     case kArchPrepareTailCall:
706       AssemblePrepareTailCall();
707       break;
708     case kArchCallCFunction: {
709       int const num_parameters = MiscField::decode(instr->opcode());
710 #if V8_ENABLE_WEBASSEMBLY
711       Label start_call;
712       bool isWasmCapiFunction =
713           linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
714       // from start_call to return address.
715       int offset = __ root_array_available() ? 64 : 112;
716 #endif  // V8_ENABLE_WEBASSEMBLY
717 #if V8_HOST_ARCH_MIPS64
718       if (FLAG_debug_code) {
719         offset += 16;
720       }
721 #endif
722 #if V8_ENABLE_WEBASSEMBLY
723       if (isWasmCapiFunction) {
724         // Put the return address in a stack slot.
725         __ mov(kScratchReg, ra);
726         __ bind(&start_call);
727         __ nal();
728         __ nop();
729         __ Daddu(ra, ra, offset - 8);  // 8 = nop + nal
730         __ sd(ra, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
731         __ mov(ra, kScratchReg);
732       }
733 #endif  // V8_ENABLE_WEBASSEMBLY
734       if (instr->InputAt(0)->IsImmediate()) {
735         ExternalReference ref = i.InputExternalReference(0);
736         __ CallCFunction(ref, num_parameters);
737       } else {
738         Register func = i.InputRegister(0);
739         __ CallCFunction(func, num_parameters);
740       }
741 #if V8_ENABLE_WEBASSEMBLY
742       if (isWasmCapiFunction) {
743         CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call));
744         RecordSafepoint(instr->reference_map());
745       }
746 #endif  // V8_ENABLE_WEBASSEMBLY
747       frame_access_state()->SetFrameAccessToDefault();
748       // Ideally, we should decrement SP delta to match the change of stack
749       // pointer in CallCFunction. However, for certain architectures (e.g.
750       // ARM), there may be more strict alignment requirement, causing old SP
751       // to be saved on the stack. In those cases, we can not calculate the SP
752       // delta statically.
753       frame_access_state()->ClearSPDelta();
754       if (caller_registers_saved_) {
755         // Need to re-sync SP delta introduced in kArchSaveCallerRegisters.
756         // Here, we assume the sequence to be:
757         //   kArchSaveCallerRegisters;
758         //   kArchCallCFunction;
759         //   kArchRestoreCallerRegisters;
760         int bytes =
761             __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
762         frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
763       }
764       break;
765     }
766     case kArchJmp:
767       AssembleArchJump(i.InputRpo(0));
768       break;
769     case kArchBinarySearchSwitch:
770       AssembleArchBinarySearchSwitch(instr);
771       break;
772     case kArchTableSwitch:
773       AssembleArchTableSwitch(instr);
774       break;
775     case kArchAbortCSADcheck:
776       DCHECK(i.InputRegister(0) == a0);
777       {
778         // We don't actually want to generate a pile of code for this, so just
779         // claim there is a stack frame, without generating one.
780         FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
781         __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
782                 RelocInfo::CODE_TARGET);
783       }
784       __ stop();
785       break;
786     case kArchDebugBreak:
787       __ DebugBreak();
788       break;
789     case kArchComment:
790       __ RecordComment(reinterpret_cast<const char*>(i.InputInt64(0)));
791       break;
792     case kArchNop:
793     case kArchThrowTerminator:
794       // don't emit code for nops.
795       break;
796     case kArchDeoptimize: {
797       DeoptimizationExit* exit =
798           BuildTranslation(instr, -1, 0, 0, OutputFrameStateCombine::Ignore());
799       __ Branch(exit->label());
800       break;
801     }
802     case kArchRet:
803       AssembleReturn(instr->InputAt(0));
804       break;
805     case kArchStackPointerGreaterThan: {
806       Register lhs_register = sp;
807       uint32_t offset;
808       if (ShouldApplyOffsetToStackCheck(instr, &offset)) {
809         lhs_register = i.TempRegister(1);
810         __ Dsubu(lhs_register, sp, offset);
811       }
812       __ Sltu(i.TempRegister(0), i.InputRegister(0), lhs_register);
813       break;
814     }
815     case kArchStackCheckOffset:
816       __ Move(i.OutputRegister(), Smi::FromInt(GetStackCheckOffset()));
817       break;
818     case kArchFramePointer:
819       __ mov(i.OutputRegister(), fp);
820       break;
821     case kArchParentFramePointer:
822       if (frame_access_state()->has_frame()) {
823         __ Ld(i.OutputRegister(), MemOperand(fp, 0));
824       } else {
825         __ mov(i.OutputRegister(), fp);
826       }
827       break;
828     case kArchTruncateDoubleToI:
829       __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
830                            i.InputDoubleRegister(0), DetermineStubCallMode());
831       break;
832     case kArchStoreWithWriteBarrier:  // Fall through.
833     case kArchAtomicStoreWithWriteBarrier: {
834       RecordWriteMode mode =
835           static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
836       Register object = i.InputRegister(0);
837       Register index = i.InputRegister(1);
838       Register value = i.InputRegister(2);
839       Register scratch0 = i.TempRegister(0);
840       Register scratch1 = i.TempRegister(1);
841       auto ool = zone()->New<OutOfLineRecordWrite>(this, object, index, value,
842                                                    scratch0, scratch1, mode,
843                                                    DetermineStubCallMode());
844       __ Daddu(kScratchReg, object, index);
845       if (arch_opcode == kArchStoreWithWriteBarrier) {
846         __ Sd(value, MemOperand(kScratchReg));
847       } else {
848         DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode);
849         __ sync();
850         __ Sd(value, MemOperand(kScratchReg));
851         __ sync();
852       }
853       if (mode > RecordWriteMode::kValueIsPointer) {
854         __ JumpIfSmi(value, ool->exit());
855       }
856       __ CheckPageFlag(object, scratch0,
857                        MemoryChunk::kPointersFromHereAreInterestingMask, ne,
858                        ool->entry());
859       __ bind(ool->exit());
860       break;
861     }
862     case kArchStackSlot: {
863       FrameOffset offset =
864           frame_access_state()->GetFrameOffset(i.InputInt32(0));
865       Register base_reg = offset.from_stack_pointer() ? sp : fp;
866       __ Daddu(i.OutputRegister(), base_reg, Operand(offset.offset()));
867       if (FLAG_debug_code) {
868         // Verify that the output_register is properly aligned
869         __ And(kScratchReg, i.OutputRegister(),
870                Operand(kSystemPointerSize - 1));
871         __ Assert(eq, AbortReason::kAllocationIsNotDoubleAligned, kScratchReg,
872                   Operand(zero_reg));
873       }
874       break;
875     }
876     case kIeee754Float64Acos:
877       ASSEMBLE_IEEE754_UNOP(acos);
878       break;
879     case kIeee754Float64Acosh:
880       ASSEMBLE_IEEE754_UNOP(acosh);
881       break;
882     case kIeee754Float64Asin:
883       ASSEMBLE_IEEE754_UNOP(asin);
884       break;
885     case kIeee754Float64Asinh:
886       ASSEMBLE_IEEE754_UNOP(asinh);
887       break;
888     case kIeee754Float64Atan:
889       ASSEMBLE_IEEE754_UNOP(atan);
890       break;
891     case kIeee754Float64Atanh:
892       ASSEMBLE_IEEE754_UNOP(atanh);
893       break;
894     case kIeee754Float64Atan2:
895       ASSEMBLE_IEEE754_BINOP(atan2);
896       break;
897     case kIeee754Float64Cos:
898       ASSEMBLE_IEEE754_UNOP(cos);
899       break;
900     case kIeee754Float64Cosh:
901       ASSEMBLE_IEEE754_UNOP(cosh);
902       break;
903     case kIeee754Float64Cbrt:
904       ASSEMBLE_IEEE754_UNOP(cbrt);
905       break;
906     case kIeee754Float64Exp:
907       ASSEMBLE_IEEE754_UNOP(exp);
908       break;
909     case kIeee754Float64Expm1:
910       ASSEMBLE_IEEE754_UNOP(expm1);
911       break;
912     case kIeee754Float64Log:
913       ASSEMBLE_IEEE754_UNOP(log);
914       break;
915     case kIeee754Float64Log1p:
916       ASSEMBLE_IEEE754_UNOP(log1p);
917       break;
918     case kIeee754Float64Log2:
919       ASSEMBLE_IEEE754_UNOP(log2);
920       break;
921     case kIeee754Float64Log10:
922       ASSEMBLE_IEEE754_UNOP(log10);
923       break;
924     case kIeee754Float64Pow:
925       ASSEMBLE_IEEE754_BINOP(pow);
926       break;
927     case kIeee754Float64Sin:
928       ASSEMBLE_IEEE754_UNOP(sin);
929       break;
930     case kIeee754Float64Sinh:
931       ASSEMBLE_IEEE754_UNOP(sinh);
932       break;
933     case kIeee754Float64Tan:
934       ASSEMBLE_IEEE754_UNOP(tan);
935       break;
936     case kIeee754Float64Tanh:
937       ASSEMBLE_IEEE754_UNOP(tanh);
938       break;
939     case kMips64Add:
940       __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
941       break;
942     case kMips64Dadd:
943       __ Daddu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
944       break;
945     case kMips64DaddOvf:
946       __ DaddOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1),
947                       kScratchReg);
948       break;
949     case kMips64Sub:
950       __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
951       break;
952     case kMips64Dsub:
953       __ Dsubu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
954       break;
955     case kMips64DsubOvf:
956       __ DsubOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1),
957                       kScratchReg);
958       break;
959     case kMips64Mul:
960       __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
961       break;
962     case kMips64MulOvf:
963       __ MulOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1),
964                      kScratchReg);
965       break;
966     case kMips64MulHigh:
967       __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
968       break;
969     case kMips64MulHighU:
970       __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
971       break;
972     case kMips64DMulHigh:
973       __ Dmulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
974       break;
975     case kMips64Div:
976       __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
977       if (kArchVariant == kMips64r6) {
978         __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
979       } else {
980         __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
981       }
982       break;
983     case kMips64DivU:
984       __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
985       if (kArchVariant == kMips64r6) {
986         __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
987       } else {
988         __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
989       }
990       break;
991     case kMips64Mod:
992       __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
993       break;
994     case kMips64ModU:
995       __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
996       break;
997     case kMips64Dmul:
998       __ Dmul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
999       break;
1000     case kMips64Ddiv:
1001       __ Ddiv(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1002       if (kArchVariant == kMips64r6) {
1003         __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1004       } else {
1005         __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
1006       }
1007       break;
1008     case kMips64DdivU:
1009       __ Ddivu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1010       if (kArchVariant == kMips64r6) {
1011         __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1012       } else {
1013         __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
1014       }
1015       break;
1016     case kMips64Dmod:
1017       __ Dmod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1018       break;
1019     case kMips64DmodU:
1020       __ Dmodu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1021       break;
1022     case kMips64Dlsa:
1023       DCHECK(instr->InputAt(2)->IsImmediate());
1024       __ Dlsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1025               i.InputInt8(2));
1026       break;
1027     case kMips64Lsa:
1028       DCHECK(instr->InputAt(2)->IsImmediate());
1029       __ Lsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1030              i.InputInt8(2));
1031       break;
1032     case kMips64And:
1033       __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1034       break;
1035     case kMips64And32:
1036         __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1037       break;
1038     case kMips64Or:
1039       __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1040       break;
1041     case kMips64Or32:
1042         __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1043       break;
1044     case kMips64Nor:
1045       if (instr->InputAt(1)->IsRegister()) {
1046         __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1047       } else {
1048         DCHECK_EQ(0, i.InputOperand(1).immediate());
1049         __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
1050       }
1051       break;
1052     case kMips64Nor32:
1053       if (instr->InputAt(1)->IsRegister()) {
1054         __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1055       } else {
1056         DCHECK_EQ(0, i.InputOperand(1).immediate());
1057         __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
1058       }
1059       break;
1060     case kMips64Xor:
1061       __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1062       break;
1063     case kMips64Xor32:
1064         __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1065         __ sll(i.OutputRegister(), i.OutputRegister(), 0x0);
1066       break;
1067     case kMips64Clz:
1068       __ Clz(i.OutputRegister(), i.InputRegister(0));
1069       break;
1070     case kMips64Dclz:
1071       __ dclz(i.OutputRegister(), i.InputRegister(0));
1072       break;
1073     case kMips64Ctz: {
1074       Register src = i.InputRegister(0);
1075       Register dst = i.OutputRegister();
1076       __ Ctz(dst, src);
1077     } break;
1078     case kMips64Dctz: {
1079       Register src = i.InputRegister(0);
1080       Register dst = i.OutputRegister();
1081       __ Dctz(dst, src);
1082     } break;
1083     case kMips64Popcnt: {
1084       Register src = i.InputRegister(0);
1085       Register dst = i.OutputRegister();
1086       __ Popcnt(dst, src);
1087     } break;
1088     case kMips64Dpopcnt: {
1089       Register src = i.InputRegister(0);
1090       Register dst = i.OutputRegister();
1091       __ Dpopcnt(dst, src);
1092     } break;
1093     case kMips64Shl:
1094       if (instr->InputAt(1)->IsRegister()) {
1095         __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1096       } else {
1097         int64_t imm = i.InputOperand(1).immediate();
1098         __ sll(i.OutputRegister(), i.InputRegister(0),
1099                static_cast<uint16_t>(imm));
1100       }
1101       break;
1102     case kMips64Shr:
1103       if (instr->InputAt(1)->IsRegister()) {
1104         __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1105       } else {
1106         int64_t imm = i.InputOperand(1).immediate();
1107         __ srl(i.OutputRegister(), i.InputRegister(0),
1108                static_cast<uint16_t>(imm));
1109       }
1110       break;
1111     case kMips64Sar:
1112       if (instr->InputAt(1)->IsRegister()) {
1113         __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1114       } else {
1115         int64_t imm = i.InputOperand(1).immediate();
1116         __ sra(i.OutputRegister(), i.InputRegister(0),
1117                static_cast<uint16_t>(imm));
1118       }
1119       break;
1120     case kMips64Ext:
1121       __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1122              i.InputInt8(2));
1123       break;
1124     case kMips64Ins:
1125       if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
1126         __ Ins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2));
1127       } else {
1128         __ Ins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1129                i.InputInt8(2));
1130       }
1131       break;
1132     case kMips64Dext: {
1133       __ Dext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1134               i.InputInt8(2));
1135       break;
1136     }
1137     case kMips64Dins:
1138       if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
1139         __ Dins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2));
1140       } else {
1141         __ Dins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1142                 i.InputInt8(2));
1143       }
1144       break;
1145     case kMips64Dshl:
1146       if (instr->InputAt(1)->IsRegister()) {
1147         __ dsllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1148       } else {
1149         int64_t imm = i.InputOperand(1).immediate();
1150         if (imm < 32) {
1151           __ dsll(i.OutputRegister(), i.InputRegister(0),
1152                   static_cast<uint16_t>(imm));
1153         } else {
1154           __ dsll32(i.OutputRegister(), i.InputRegister(0),
1155                     static_cast<uint16_t>(imm - 32));
1156         }
1157       }
1158       break;
1159     case kMips64Dshr:
1160       if (instr->InputAt(1)->IsRegister()) {
1161         __ dsrlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1162       } else {
1163         int64_t imm = i.InputOperand(1).immediate();
1164         if (imm < 32) {
1165           __ dsrl(i.OutputRegister(), i.InputRegister(0),
1166                   static_cast<uint16_t>(imm));
1167         } else {
1168           __ dsrl32(i.OutputRegister(), i.InputRegister(0),
1169                     static_cast<uint16_t>(imm - 32));
1170         }
1171       }
1172       break;
1173     case kMips64Dsar:
1174       if (instr->InputAt(1)->IsRegister()) {
1175         __ dsrav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1176       } else {
1177         int64_t imm = i.InputOperand(1).immediate();
1178         if (imm < 32) {
1179           __ dsra(i.OutputRegister(), i.InputRegister(0), imm);
1180         } else {
1181           __ dsra32(i.OutputRegister(), i.InputRegister(0), imm - 32);
1182         }
1183       }
1184       break;
1185     case kMips64Ror:
1186       __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1187       break;
1188     case kMips64Dror:
1189       __ Dror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1190       break;
1191     case kMips64Tst:
1192       __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
1193       // Pseudo-instruction used for cmp/branch. No opcode emitted here.
1194       break;
1195     case kMips64Cmp:
1196       // Pseudo-instruction used for cmp/branch. No opcode emitted here.
1197       break;
1198     case kMips64Mov:
1199       // TODO(plind): Should we combine mov/li like this, or use separate instr?
1200       //    - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
1201       if (HasRegisterInput(instr, 0)) {
1202         __ mov(i.OutputRegister(), i.InputRegister(0));
1203       } else {
1204         __ li(i.OutputRegister(), i.InputOperand(0));
1205       }
1206       break;
1207 
1208     case kMips64CmpS: {
1209       FPURegister left = i.InputOrZeroSingleRegister(0);
1210       FPURegister right = i.InputOrZeroSingleRegister(1);
1211       bool predicate;
1212       FPUCondition cc =
1213           FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition());
1214 
1215       if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
1216           !__ IsDoubleZeroRegSet()) {
1217         __ Move(kDoubleRegZero, 0.0);
1218       }
1219 
1220       __ CompareF32(cc, left, right);
1221     } break;
1222     case kMips64AddS:
1223       // TODO(plind): add special case: combine mult & add.
1224       __ add_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1225                i.InputDoubleRegister(1));
1226       break;
1227     case kMips64SubS:
1228       __ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1229                i.InputDoubleRegister(1));
1230       break;
1231     case kMips64MulS:
1232       // TODO(plind): add special case: right op is -1.0, see arm port.
1233       __ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1234                i.InputDoubleRegister(1));
1235       break;
1236     case kMips64DivS:
1237       __ div_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1238                i.InputDoubleRegister(1));
1239       break;
1240     case kMips64AbsS:
1241       if (kArchVariant == kMips64r6) {
1242         __ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1243       } else {
1244         __ mfc1(kScratchReg, i.InputSingleRegister(0));
1245         __ Dins(kScratchReg, zero_reg, 31, 1);
1246         __ mtc1(kScratchReg, i.OutputSingleRegister());
1247       }
1248       break;
1249     case kMips64NegS:
1250       __ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1251       break;
1252     case kMips64SqrtS: {
1253       __ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1254       break;
1255     }
1256     case kMips64MaxS:
1257       __ max_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1258                i.InputDoubleRegister(1));
1259       break;
1260     case kMips64MinS:
1261       __ min_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1262                i.InputDoubleRegister(1));
1263       break;
1264     case kMips64CmpD: {
1265       FPURegister left = i.InputOrZeroDoubleRegister(0);
1266       FPURegister right = i.InputOrZeroDoubleRegister(1);
1267       bool predicate;
1268       FPUCondition cc =
1269           FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition());
1270       if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
1271           !__ IsDoubleZeroRegSet()) {
1272         __ Move(kDoubleRegZero, 0.0);
1273       }
1274       __ CompareF64(cc, left, right);
1275     } break;
1276     case kMips64AddD:
1277       // TODO(plind): add special case: combine mult & add.
1278       __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1279                i.InputDoubleRegister(1));
1280       break;
1281     case kMips64SubD:
1282       __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1283                i.InputDoubleRegister(1));
1284       break;
1285     case kMips64MulD:
1286       // TODO(plind): add special case: right op is -1.0, see arm port.
1287       __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1288                i.InputDoubleRegister(1));
1289       break;
1290     case kMips64DivD:
1291       __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1292                i.InputDoubleRegister(1));
1293       break;
1294     case kMips64ModD: {
1295       // TODO(bmeurer): We should really get rid of this special instruction,
1296       // and generate a CallAddress instruction instead.
1297       FrameScope scope(tasm(), StackFrame::MANUAL);
1298       __ PrepareCallCFunction(0, 2, kScratchReg);
1299       __ MovToFloatParameters(i.InputDoubleRegister(0),
1300                               i.InputDoubleRegister(1));
1301       __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
1302       // Move the result in the double result register.
1303       __ MovFromFloatResult(i.OutputDoubleRegister());
1304       break;
1305     }
1306     case kMips64AbsD:
1307       if (kArchVariant == kMips64r6) {
1308         __ abs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1309       } else {
1310         __ dmfc1(kScratchReg, i.InputDoubleRegister(0));
1311         __ Dins(kScratchReg, zero_reg, 63, 1);
1312         __ dmtc1(kScratchReg, i.OutputDoubleRegister());
1313       }
1314       break;
1315     case kMips64NegD:
1316       __ Neg_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1317       break;
1318     case kMips64SqrtD: {
1319       __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1320       break;
1321     }
1322     case kMips64MaxD:
1323       __ max_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1324                i.InputDoubleRegister(1));
1325       break;
1326     case kMips64MinD:
1327       __ min_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1328                i.InputDoubleRegister(1));
1329       break;
1330     case kMips64Float64RoundDown: {
1331       __ Floor_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1332       break;
1333     }
1334     case kMips64Float32RoundDown: {
1335       __ Floor_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1336       break;
1337     }
1338     case kMips64Float64RoundTruncate: {
1339       __ Trunc_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1340       break;
1341     }
1342     case kMips64Float32RoundTruncate: {
1343       __ Trunc_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1344       break;
1345     }
1346     case kMips64Float64RoundUp: {
1347       __ Ceil_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1348       break;
1349     }
1350     case kMips64Float32RoundUp: {
1351       __ Ceil_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1352       break;
1353     }
1354     case kMips64Float64RoundTiesEven: {
1355       __ Round_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1356       break;
1357     }
1358     case kMips64Float32RoundTiesEven: {
1359       __ Round_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1360       break;
1361     }
1362     case kMips64Float32Max: {
1363       FPURegister dst = i.OutputSingleRegister();
1364       FPURegister src1 = i.InputSingleRegister(0);
1365       FPURegister src2 = i.InputSingleRegister(1);
1366       auto ool = zone()->New<OutOfLineFloat32Max>(this, dst, src1, src2);
1367       __ Float32Max(dst, src1, src2, ool->entry());
1368       __ bind(ool->exit());
1369       break;
1370     }
1371     case kMips64Float64Max: {
1372       FPURegister dst = i.OutputDoubleRegister();
1373       FPURegister src1 = i.InputDoubleRegister(0);
1374       FPURegister src2 = i.InputDoubleRegister(1);
1375       auto ool = zone()->New<OutOfLineFloat64Max>(this, dst, src1, src2);
1376       __ Float64Max(dst, src1, src2, ool->entry());
1377       __ bind(ool->exit());
1378       break;
1379     }
1380     case kMips64Float32Min: {
1381       FPURegister dst = i.OutputSingleRegister();
1382       FPURegister src1 = i.InputSingleRegister(0);
1383       FPURegister src2 = i.InputSingleRegister(1);
1384       auto ool = zone()->New<OutOfLineFloat32Min>(this, dst, src1, src2);
1385       __ Float32Min(dst, src1, src2, ool->entry());
1386       __ bind(ool->exit());
1387       break;
1388     }
1389     case kMips64Float64Min: {
1390       FPURegister dst = i.OutputDoubleRegister();
1391       FPURegister src1 = i.InputDoubleRegister(0);
1392       FPURegister src2 = i.InputDoubleRegister(1);
1393       auto ool = zone()->New<OutOfLineFloat64Min>(this, dst, src1, src2);
1394       __ Float64Min(dst, src1, src2, ool->entry());
1395       __ bind(ool->exit());
1396       break;
1397     }
1398     case kMips64Float64SilenceNaN:
1399       __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1400       break;
1401     case kMips64CvtSD:
1402       __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
1403       break;
1404     case kMips64CvtDS:
1405       __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
1406       break;
1407     case kMips64CvtDW: {
1408       FPURegister scratch = kScratchDoubleReg;
1409       __ mtc1(i.InputRegister(0), scratch);
1410       __ cvt_d_w(i.OutputDoubleRegister(), scratch);
1411       break;
1412     }
1413     case kMips64CvtSW: {
1414       FPURegister scratch = kScratchDoubleReg;
1415       __ mtc1(i.InputRegister(0), scratch);
1416       __ cvt_s_w(i.OutputDoubleRegister(), scratch);
1417       break;
1418     }
1419     case kMips64CvtSUw: {
1420       __ Cvt_s_uw(i.OutputDoubleRegister(), i.InputRegister(0));
1421       break;
1422     }
1423     case kMips64CvtSL: {
1424       FPURegister scratch = kScratchDoubleReg;
1425       __ dmtc1(i.InputRegister(0), scratch);
1426       __ cvt_s_l(i.OutputDoubleRegister(), scratch);
1427       break;
1428     }
1429     case kMips64CvtDL: {
1430       FPURegister scratch = kScratchDoubleReg;
1431       __ dmtc1(i.InputRegister(0), scratch);
1432       __ cvt_d_l(i.OutputDoubleRegister(), scratch);
1433       break;
1434     }
1435     case kMips64CvtDUw: {
1436       __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0));
1437       break;
1438     }
1439     case kMips64CvtDUl: {
1440       __ Cvt_d_ul(i.OutputDoubleRegister(), i.InputRegister(0));
1441       break;
1442     }
1443     case kMips64CvtSUl: {
1444       __ Cvt_s_ul(i.OutputDoubleRegister(), i.InputRegister(0));
1445       break;
1446     }
1447     case kMips64FloorWD: {
1448       FPURegister scratch = kScratchDoubleReg;
1449       __ floor_w_d(scratch, i.InputDoubleRegister(0));
1450       __ mfc1(i.OutputRegister(), scratch);
1451       break;
1452     }
1453     case kMips64CeilWD: {
1454       FPURegister scratch = kScratchDoubleReg;
1455       __ ceil_w_d(scratch, i.InputDoubleRegister(0));
1456       __ mfc1(i.OutputRegister(), scratch);
1457       break;
1458     }
1459     case kMips64RoundWD: {
1460       FPURegister scratch = kScratchDoubleReg;
1461       __ round_w_d(scratch, i.InputDoubleRegister(0));
1462       __ mfc1(i.OutputRegister(), scratch);
1463       break;
1464     }
1465     case kMips64TruncWD: {
1466       FPURegister scratch = kScratchDoubleReg;
1467       // Other arches use round to zero here, so we follow.
1468       __ trunc_w_d(scratch, i.InputDoubleRegister(0));
1469       __ mfc1(i.OutputRegister(), scratch);
1470       break;
1471     }
1472     case kMips64FloorWS: {
1473       FPURegister scratch = kScratchDoubleReg;
1474       __ floor_w_s(scratch, i.InputDoubleRegister(0));
1475       __ mfc1(i.OutputRegister(), scratch);
1476       break;
1477     }
1478     case kMips64CeilWS: {
1479       FPURegister scratch = kScratchDoubleReg;
1480       __ ceil_w_s(scratch, i.InputDoubleRegister(0));
1481       __ mfc1(i.OutputRegister(), scratch);
1482       break;
1483     }
1484     case kMips64RoundWS: {
1485       FPURegister scratch = kScratchDoubleReg;
1486       __ round_w_s(scratch, i.InputDoubleRegister(0));
1487       __ mfc1(i.OutputRegister(), scratch);
1488       break;
1489     }
1490     case kMips64TruncWS: {
1491       FPURegister scratch = kScratchDoubleReg;
1492       bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode());
1493       __ trunc_w_s(scratch, i.InputDoubleRegister(0));
1494       __ mfc1(i.OutputRegister(), scratch);
1495       if (set_overflow_to_min_i32) {
1496         // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
1497         // because INT32_MIN allows easier out-of-bounds detection.
1498         __ addiu(kScratchReg, i.OutputRegister(), 1);
1499         __ slt(kScratchReg2, kScratchReg, i.OutputRegister());
1500         __ Movn(i.OutputRegister(), kScratchReg, kScratchReg2);
1501       }
1502       break;
1503     }
1504     case kMips64TruncLS: {
1505       FPURegister scratch = kScratchDoubleReg;
1506       Register result = kScratchReg;
1507 
1508       bool load_status = instr->OutputCount() > 1;
1509       // Other arches use round to zero here, so we follow.
1510       __ trunc_l_s(scratch, i.InputDoubleRegister(0));
1511       __ dmfc1(i.OutputRegister(), scratch);
1512       if (load_status) {
1513         __ cfc1(result, FCSR);
1514         // Check for overflow and NaNs.
1515         __ And(result, result,
1516                (kFCSROverflowCauseMask | kFCSRInvalidOpCauseMask));
1517         __ Slt(result, zero_reg, result);
1518         __ xori(result, result, 1);
1519         __ mov(i.OutputRegister(1), result);
1520       }
1521       break;
1522     }
1523     case kMips64TruncLD: {
1524       FPURegister scratch = kScratchDoubleReg;
1525       Register result = kScratchReg;
1526 
1527       bool set_overflow_to_min_i64 = MiscField::decode(instr->opcode());
1528       bool load_status = instr->OutputCount() > 1;
1529       DCHECK_IMPLIES(set_overflow_to_min_i64, instr->OutputCount() == 1);
1530       // Other arches use round to zero here, so we follow.
1531       __ trunc_l_d(scratch, i.InputDoubleRegister(0));
1532       __ dmfc1(i.OutputRegister(0), scratch);
1533       if (load_status) {
1534         __ cfc1(result, FCSR);
1535         // Check for overflow and NaNs.
1536         __ And(result, result,
1537                (kFCSROverflowCauseMask | kFCSRInvalidOpCauseMask));
1538         __ Slt(result, zero_reg, result);
1539         __ xori(result, result, 1);
1540         __ mov(i.OutputRegister(1), result);
1541       }
1542       if (set_overflow_to_min_i64) {
1543         // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
1544         // because INT64_MIN allows easier out-of-bounds detection.
1545         __ Daddu(kScratchReg, i.OutputRegister(), 1);
1546         __ slt(kScratchReg2, kScratchReg, i.OutputRegister());
1547         __ Movn(i.OutputRegister(), kScratchReg, kScratchReg2);
1548       }
1549       break;
1550     }
1551     case kMips64TruncUwD: {
1552       FPURegister scratch = kScratchDoubleReg;
1553       __ Trunc_uw_d(i.OutputRegister(), i.InputDoubleRegister(0), scratch);
1554       break;
1555     }
1556     case kMips64TruncUwS: {
1557       FPURegister scratch = kScratchDoubleReg;
1558       bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode());
1559       __ Trunc_uw_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch);
1560       if (set_overflow_to_min_i32) {
1561         // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
1562         // because 0 allows easier out-of-bounds detection.
1563         __ addiu(kScratchReg, i.OutputRegister(), 1);
1564         __ Movz(i.OutputRegister(), zero_reg, kScratchReg);
1565       }
1566       break;
1567     }
1568     case kMips64TruncUlS: {
1569       FPURegister scratch = kScratchDoubleReg;
1570       Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
1571       __ Trunc_ul_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch,
1572                     result);
1573       break;
1574     }
1575     case kMips64TruncUlD: {
1576       FPURegister scratch = kScratchDoubleReg;
1577       Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
1578       __ Trunc_ul_d(i.OutputRegister(0), i.InputDoubleRegister(0), scratch,
1579                     result);
1580       break;
1581     }
1582     case kMips64BitcastDL:
1583       __ dmfc1(i.OutputRegister(), i.InputDoubleRegister(0));
1584       break;
1585     case kMips64BitcastLD:
1586       __ dmtc1(i.InputRegister(0), i.OutputDoubleRegister());
1587       break;
1588     case kMips64Float64ExtractLowWord32:
1589       __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0));
1590       break;
1591     case kMips64Float64ExtractHighWord32:
1592       __ FmoveHigh(i.OutputRegister(), i.InputDoubleRegister(0));
1593       break;
1594     case kMips64Float64InsertLowWord32:
1595       __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1));
1596       break;
1597     case kMips64Float64InsertHighWord32:
1598       __ FmoveHigh(i.OutputDoubleRegister(), i.InputRegister(1));
1599       break;
1600     // ... more basic instructions ...
1601 
1602     case kMips64Seb:
1603       __ seb(i.OutputRegister(), i.InputRegister(0));
1604       break;
1605     case kMips64Seh:
1606       __ seh(i.OutputRegister(), i.InputRegister(0));
1607       break;
1608     case kMips64Lbu:
1609       __ Lbu(i.OutputRegister(), i.MemoryOperand());
1610       break;
1611     case kMips64Lb:
1612       __ Lb(i.OutputRegister(), i.MemoryOperand());
1613       break;
1614     case kMips64Sb:
1615       __ Sb(i.InputOrZeroRegister(2), i.MemoryOperand());
1616       break;
1617     case kMips64Lhu:
1618       __ Lhu(i.OutputRegister(), i.MemoryOperand());
1619       break;
1620     case kMips64Ulhu:
1621       __ Ulhu(i.OutputRegister(), i.MemoryOperand());
1622       break;
1623     case kMips64Lh:
1624       __ Lh(i.OutputRegister(), i.MemoryOperand());
1625       break;
1626     case kMips64Ulh:
1627       __ Ulh(i.OutputRegister(), i.MemoryOperand());
1628       break;
1629     case kMips64Sh:
1630       __ Sh(i.InputOrZeroRegister(2), i.MemoryOperand());
1631       break;
1632     case kMips64Ush:
1633       __ Ush(i.InputOrZeroRegister(2), i.MemoryOperand(), kScratchReg);
1634       break;
1635     case kMips64Lw:
1636       __ Lw(i.OutputRegister(), i.MemoryOperand());
1637       break;
1638     case kMips64Ulw:
1639       __ Ulw(i.OutputRegister(), i.MemoryOperand());
1640       break;
1641     case kMips64Lwu:
1642       __ Lwu(i.OutputRegister(), i.MemoryOperand());
1643       break;
1644     case kMips64Ulwu:
1645       __ Ulwu(i.OutputRegister(), i.MemoryOperand());
1646       break;
1647     case kMips64Ld:
1648       __ Ld(i.OutputRegister(), i.MemoryOperand());
1649       break;
1650     case kMips64Uld:
1651       __ Uld(i.OutputRegister(), i.MemoryOperand());
1652       break;
1653     case kMips64Sw:
1654       __ Sw(i.InputOrZeroRegister(2), i.MemoryOperand());
1655       break;
1656     case kMips64Usw:
1657       __ Usw(i.InputOrZeroRegister(2), i.MemoryOperand());
1658       break;
1659     case kMips64Sd:
1660       __ Sd(i.InputOrZeroRegister(2), i.MemoryOperand());
1661       break;
1662     case kMips64Usd:
1663       __ Usd(i.InputOrZeroRegister(2), i.MemoryOperand());
1664       break;
1665     case kMips64Lwc1: {
1666       __ Lwc1(i.OutputSingleRegister(), i.MemoryOperand());
1667       break;
1668     }
1669     case kMips64Ulwc1: {
1670       __ Ulwc1(i.OutputSingleRegister(), i.MemoryOperand(), kScratchReg);
1671       break;
1672     }
1673     case kMips64Swc1: {
1674       size_t index = 0;
1675       MemOperand operand = i.MemoryOperand(&index);
1676       FPURegister ft = i.InputOrZeroSingleRegister(index);
1677       if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
1678         __ Move(kDoubleRegZero, 0.0);
1679       }
1680       __ Swc1(ft, operand);
1681       break;
1682     }
1683     case kMips64Uswc1: {
1684       size_t index = 0;
1685       MemOperand operand = i.MemoryOperand(&index);
1686       FPURegister ft = i.InputOrZeroSingleRegister(index);
1687       if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
1688         __ Move(kDoubleRegZero, 0.0);
1689       }
1690       __ Uswc1(ft, operand, kScratchReg);
1691       break;
1692     }
1693     case kMips64Ldc1:
1694       __ Ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
1695       break;
1696     case kMips64Uldc1:
1697       __ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
1698       break;
1699     case kMips64Sdc1: {
1700       FPURegister ft = i.InputOrZeroDoubleRegister(2);
1701       if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
1702         __ Move(kDoubleRegZero, 0.0);
1703       }
1704       __ Sdc1(ft, i.MemoryOperand());
1705       break;
1706     }
1707     case kMips64Usdc1: {
1708       FPURegister ft = i.InputOrZeroDoubleRegister(2);
1709       if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
1710         __ Move(kDoubleRegZero, 0.0);
1711       }
1712       __ Usdc1(ft, i.MemoryOperand(), kScratchReg);
1713       break;
1714     }
1715     case kMips64Sync: {
1716       __ sync();
1717       break;
1718     }
1719     case kMips64Push:
1720       if (instr->InputAt(0)->IsFPRegister()) {
1721         __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
1722         __ Subu(sp, sp, Operand(kDoubleSize));
1723         frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize);
1724       } else {
1725         __ Push(i.InputRegister(0));
1726         frame_access_state()->IncreaseSPDelta(1);
1727       }
1728       break;
1729     case kMips64Peek: {
1730       int reverse_slot = i.InputInt32(0);
1731       int offset =
1732           FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
1733       if (instr->OutputAt(0)->IsFPRegister()) {
1734         LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
1735         if (op->representation() == MachineRepresentation::kFloat64) {
1736           __ Ldc1(i.OutputDoubleRegister(), MemOperand(fp, offset));
1737         } else if (op->representation() == MachineRepresentation::kFloat32) {
1738           __ Lwc1(
1739               i.OutputSingleRegister(0),
1740               MemOperand(fp, offset + kLessSignificantWordInDoublewordOffset));
1741         } else {
1742           DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
1743           __ ld_b(i.OutputSimd128Register(), MemOperand(fp, offset));
1744         }
1745       } else {
1746         __ Ld(i.OutputRegister(0), MemOperand(fp, offset));
1747       }
1748       break;
1749     }
1750     case kMips64StackClaim: {
1751       __ Dsubu(sp, sp, Operand(i.InputInt32(0)));
1752       frame_access_state()->IncreaseSPDelta(i.InputInt32(0) /
1753                                             kSystemPointerSize);
1754       break;
1755     }
1756     case kMips64StoreToStackSlot: {
1757       if (instr->InputAt(0)->IsFPRegister()) {
1758         if (instr->InputAt(0)->IsSimd128Register()) {
1759           CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1760           __ st_b(i.InputSimd128Register(0), MemOperand(sp, i.InputInt32(1)));
1761         } else {
1762           __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
1763         }
1764       } else {
1765         __ Sd(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
1766       }
1767       break;
1768     }
1769     case kMips64ByteSwap64: {
1770       __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 8);
1771       break;
1772     }
1773     case kMips64ByteSwap32: {
1774       __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4);
1775       break;
1776     }
1777     case kMips64S128LoadSplat: {
1778       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1779       auto sz = static_cast<MSASize>(MiscField::decode(instr->opcode()));
1780       __ LoadSplat(sz, i.OutputSimd128Register(), i.MemoryOperand());
1781       break;
1782     }
1783     case kMips64S128Load8x8S: {
1784       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1785       Simd128Register dst = i.OutputSimd128Register();
1786       Simd128Register scratch = kSimd128ScratchReg;
1787       __ Ld(kScratchReg, i.MemoryOperand());
1788       __ fill_d(dst, kScratchReg);
1789       __ clti_s_b(scratch, dst, 0);
1790       __ ilvr_b(dst, scratch, dst);
1791       break;
1792     }
1793     case kMips64S128Load8x8U: {
1794       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1795       Simd128Register dst = i.OutputSimd128Register();
1796       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
1797       __ Ld(kScratchReg, i.MemoryOperand());
1798       __ fill_d(dst, kScratchReg);
1799       __ ilvr_b(dst, kSimd128RegZero, dst);
1800       break;
1801     }
1802     case kMips64S128Load16x4S: {
1803       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1804       Simd128Register dst = i.OutputSimd128Register();
1805       Simd128Register scratch = kSimd128ScratchReg;
1806       __ Ld(kScratchReg, i.MemoryOperand());
1807       __ fill_d(dst, kScratchReg);
1808       __ clti_s_h(scratch, dst, 0);
1809       __ ilvr_h(dst, scratch, dst);
1810       break;
1811     }
1812     case kMips64S128Load16x4U: {
1813       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1814       Simd128Register dst = i.OutputSimd128Register();
1815       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
1816       __ Ld(kScratchReg, i.MemoryOperand());
1817       __ fill_d(dst, kScratchReg);
1818       __ ilvr_h(dst, kSimd128RegZero, dst);
1819       break;
1820     }
1821     case kMips64S128Load32x2S: {
1822       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1823       Simd128Register dst = i.OutputSimd128Register();
1824       Simd128Register scratch = kSimd128ScratchReg;
1825       __ Ld(kScratchReg, i.MemoryOperand());
1826       __ fill_d(dst, kScratchReg);
1827       __ clti_s_w(scratch, dst, 0);
1828       __ ilvr_w(dst, scratch, dst);
1829       break;
1830     }
1831     case kMips64S128Load32x2U: {
1832       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1833       Simd128Register dst = i.OutputSimd128Register();
1834       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
1835       __ Ld(kScratchReg, i.MemoryOperand());
1836       __ fill_d(dst, kScratchReg);
1837       __ ilvr_w(dst, kSimd128RegZero, dst);
1838       break;
1839     }
1840     case kMips64S128Load32Zero: {
1841       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1842       Simd128Register dst = i.OutputSimd128Register();
1843       __ xor_v(dst, dst, dst);
1844       __ Lwu(kScratchReg, i.MemoryOperand());
1845       __ insert_w(dst, 0, kScratchReg);
1846       break;
1847     }
1848     case kMips64S128Load64Zero: {
1849       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1850       Simd128Register dst = i.OutputSimd128Register();
1851       __ xor_v(dst, dst, dst);
1852       __ Ld(kScratchReg, i.MemoryOperand());
1853       __ insert_d(dst, 0, kScratchReg);
1854       break;
1855     }
1856     case kMips64S128LoadLane: {
1857       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1858       Simd128Register dst = i.OutputSimd128Register();
1859       DCHECK_EQ(dst, i.InputSimd128Register(0));
1860       auto sz = static_cast<MSASize>(MiscField::decode(instr->opcode()));
1861       __ LoadLane(sz, dst, i.InputUint8(1), i.MemoryOperand(2));
1862       break;
1863     }
1864     case kMips64S128StoreLane: {
1865       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1866       Simd128Register src = i.InputSimd128Register(0);
1867       auto sz = static_cast<MSASize>(MiscField::decode(instr->opcode()));
1868       __ StoreLane(sz, src, i.InputUint8(1), i.MemoryOperand(2));
1869       break;
1870     }
1871     case kAtomicLoadInt8:
1872       DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
1873       ASSEMBLE_ATOMIC_LOAD_INTEGER(Lb);
1874       break;
1875     case kAtomicLoadUint8:
1876       ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu);
1877       break;
1878     case kAtomicLoadInt16:
1879       DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
1880       ASSEMBLE_ATOMIC_LOAD_INTEGER(Lh);
1881       break;
1882     case kAtomicLoadUint16:
1883       ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu);
1884       break;
1885     case kAtomicLoadWord32:
1886       if (AtomicWidthField::decode(opcode) == AtomicWidth::kWord32)
1887         ASSEMBLE_ATOMIC_LOAD_INTEGER(Lw);
1888       else
1889         ASSEMBLE_ATOMIC_LOAD_INTEGER(Lwu);
1890       break;
1891     case kMips64Word64AtomicLoadUint64:
1892       ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld);
1893       break;
1894     case kAtomicStoreWord8:
1895       ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
1896       break;
1897     case kAtomicStoreWord16:
1898       ASSEMBLE_ATOMIC_STORE_INTEGER(Sh);
1899       break;
1900     case kAtomicStoreWord32:
1901       ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
1902       break;
1903     case kMips64StoreCompressTagged:
1904     case kMips64Word64AtomicStoreWord64:
1905       ASSEMBLE_ATOMIC_STORE_INTEGER(Sd);
1906       break;
1907     case kAtomicExchangeInt8:
1908       DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
1909       ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32);
1910       break;
1911     case kAtomicExchangeUint8:
1912       switch (AtomicWidthField::decode(opcode)) {
1913         case AtomicWidth::kWord32:
1914           ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
1915           break;
1916         case AtomicWidth::kWord64:
1917           ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
1918           break;
1919       }
1920       break;
1921     case kAtomicExchangeInt16:
1922       DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
1923       ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32);
1924       break;
1925     case kAtomicExchangeUint16:
1926       switch (AtomicWidthField::decode(opcode)) {
1927         case AtomicWidth::kWord32:
1928           ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
1929           break;
1930         case AtomicWidth::kWord64:
1931           ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
1932           break;
1933       }
1934       break;
1935     case kAtomicExchangeWord32:
1936       switch (AtomicWidthField::decode(opcode)) {
1937         case AtomicWidth::kWord32:
1938           ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Ll, Sc);
1939           break;
1940         case AtomicWidth::kWord64:
1941           ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
1942           break;
1943       }
1944       break;
1945     case kMips64Word64AtomicExchangeUint64:
1946       ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Lld, Scd);
1947       break;
1948     case kAtomicCompareExchangeInt8:
1949       DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
1950       ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32);
1951       break;
1952     case kAtomicCompareExchangeUint8:
1953       switch (AtomicWidthField::decode(opcode)) {
1954         case AtomicWidth::kWord32:
1955           ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
1956           break;
1957         case AtomicWidth::kWord64:
1958           ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
1959           break;
1960       }
1961       break;
1962     case kAtomicCompareExchangeInt16:
1963       DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
1964       ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32);
1965       break;
1966     case kAtomicCompareExchangeUint16:
1967       switch (AtomicWidthField::decode(opcode)) {
1968         case AtomicWidth::kWord32:
1969           ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
1970           break;
1971         case AtomicWidth::kWord64:
1972           ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
1973           break;
1974       }
1975       break;
1976     case kAtomicCompareExchangeWord32:
1977       switch (AtomicWidthField::decode(opcode)) {
1978         case AtomicWidth::kWord32:
1979           __ sll(i.InputRegister(2), i.InputRegister(2), 0);
1980           ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll, Sc);
1981           break;
1982         case AtomicWidth::kWord64:
1983           ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
1984           break;
1985       }
1986       break;
1987     case kMips64Word64AtomicCompareExchangeUint64:
1988       ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Lld, Scd);
1989       break;
1990 #define ATOMIC_BINOP_CASE(op, inst32, inst64)                          \
1991   case kAtomic##op##Int8:                                              \
1992     DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
1993     ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst32, 32);            \
1994     break;                                                             \
1995   case kAtomic##op##Uint8:                                             \
1996     switch (AtomicWidthField::decode(opcode)) {                        \
1997       case AtomicWidth::kWord32:                                       \
1998         ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst32, 32);       \
1999         break;                                                         \
2000       case AtomicWidth::kWord64:                                       \
2001         ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst64, 64);     \
2002         break;                                                         \
2003     }                                                                  \
2004     break;                                                             \
2005   case kAtomic##op##Int16:                                             \
2006     DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
2007     ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst32, 32);           \
2008     break;                                                             \
2009   case kAtomic##op##Uint16:                                            \
2010     switch (AtomicWidthField::decode(opcode)) {                        \
2011       case AtomicWidth::kWord32:                                       \
2012         ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst32, 32);      \
2013         break;                                                         \
2014       case AtomicWidth::kWord64:                                       \
2015         ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst64, 64);    \
2016         break;                                                         \
2017     }                                                                  \
2018     break;                                                             \
2019   case kAtomic##op##Word32:                                            \
2020     switch (AtomicWidthField::decode(opcode)) {                        \
2021       case AtomicWidth::kWord32:                                       \
2022         ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst32);                         \
2023         break;                                                         \
2024       case AtomicWidth::kWord64:                                       \
2025         ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst64, 64);    \
2026         break;                                                         \
2027     }                                                                  \
2028     break;                                                             \
2029   case kMips64Word64Atomic##op##Uint64:                                \
2030     ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst64);                           \
2031     break;
2032       ATOMIC_BINOP_CASE(Add, Addu, Daddu)
2033       ATOMIC_BINOP_CASE(Sub, Subu, Dsubu)
2034       ATOMIC_BINOP_CASE(And, And, And)
2035       ATOMIC_BINOP_CASE(Or, Or, Or)
2036       ATOMIC_BINOP_CASE(Xor, Xor, Xor)
2037 #undef ATOMIC_BINOP_CASE
2038     case kMips64AssertEqual:
2039       __ Assert(eq, static_cast<AbortReason>(i.InputOperand(2).immediate()),
2040                 i.InputRegister(0), Operand(i.InputRegister(1)));
2041       break;
2042     case kMips64S128Const: {
2043       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2044       Simd128Register dst = i.OutputSimd128Register();
2045       uint64_t imm1 = make_uint64(i.InputUint32(1), i.InputUint32(0));
2046       uint64_t imm2 = make_uint64(i.InputUint32(3), i.InputUint32(2));
2047       __ li(kScratchReg, imm1);
2048       __ insert_d(dst, 0, kScratchReg);
2049       __ li(kScratchReg, imm2);
2050       __ insert_d(dst, 1, kScratchReg);
2051       break;
2052     }
2053     case kMips64S128Zero: {
2054       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2055       Simd128Register dst = i.OutputSimd128Register();
2056       __ xor_v(dst, dst, dst);
2057       break;
2058     }
2059     case kMips64S128AllOnes: {
2060       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2061       Simd128Register dst = i.OutputSimd128Register();
2062       __ ceq_d(dst, dst, dst);
2063       break;
2064     }
2065     case kMips64I32x4Splat: {
2066       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2067       __ fill_w(i.OutputSimd128Register(), i.InputRegister(0));
2068       break;
2069     }
2070     case kMips64I32x4ExtractLane: {
2071       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2072       __ copy_s_w(i.OutputRegister(), i.InputSimd128Register(0),
2073                   i.InputInt8(1));
2074       break;
2075     }
2076     case kMips64I32x4ReplaceLane: {
2077       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2078       Simd128Register src = i.InputSimd128Register(0);
2079       Simd128Register dst = i.OutputSimd128Register();
2080       if (src != dst) {
2081         __ move_v(dst, src);
2082       }
2083       __ insert_w(dst, i.InputInt8(1), i.InputRegister(2));
2084       break;
2085     }
2086     case kMips64I32x4Add: {
2087       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2088       __ addv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2089                 i.InputSimd128Register(1));
2090       break;
2091     }
2092     case kMips64I32x4Sub: {
2093       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2094       __ subv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2095                 i.InputSimd128Register(1));
2096       break;
2097     }
2098     case kMips64F64x2Abs: {
2099       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2100       __ bclri_d(i.OutputSimd128Register(), i.InputSimd128Register(0), 63);
2101       break;
2102     }
2103     case kMips64F64x2Neg: {
2104       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2105       __ bnegi_d(i.OutputSimd128Register(), i.InputSimd128Register(0), 63);
2106       break;
2107     }
2108     case kMips64F64x2Sqrt: {
2109       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2110       __ fsqrt_d(i.OutputSimd128Register(), i.InputSimd128Register(0));
2111       break;
2112     }
2113     case kMips64F64x2Add: {
2114       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2115       ASSEMBLE_F64X2_ARITHMETIC_BINOP(fadd_d);
2116       break;
2117     }
2118     case kMips64F64x2Sub: {
2119       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2120       ASSEMBLE_F64X2_ARITHMETIC_BINOP(fsub_d);
2121       break;
2122     }
2123     case kMips64F64x2Mul: {
2124       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2125       ASSEMBLE_F64X2_ARITHMETIC_BINOP(fmul_d);
2126       break;
2127     }
2128     case kMips64F64x2Div: {
2129       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2130       ASSEMBLE_F64X2_ARITHMETIC_BINOP(fdiv_d);
2131       break;
2132     }
2133     case kMips64F64x2Min: {
2134       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2135       Simd128Register dst = i.OutputSimd128Register();
2136       Simd128Register src0 = i.InputSimd128Register(0);
2137       Simd128Register src1 = i.InputSimd128Register(1);
2138       Simd128Register scratch0 = kSimd128RegZero;
2139       Simd128Register scratch1 = kSimd128ScratchReg;
2140 
2141       // If inputs are -0.0. and +0.0, then write -0.0 to scratch1.
2142       // scratch1 = (src0 == src1) ?  (src0 | src1) : (src1 | src1).
2143       __ fseq_d(scratch0, src0, src1);
2144       __ bsel_v(scratch0, src1, src0);
2145       __ or_v(scratch1, scratch0, src1);
2146       // scratch0 = isNaN(src0) ? src0 : scratch1.
2147       __ fseq_d(scratch0, src0, src0);
2148       __ bsel_v(scratch0, src0, scratch1);
2149       // scratch1 = (src0 < scratch0) ? src0 : scratch0.
2150       __ fslt_d(scratch1, src0, scratch0);
2151       __ bsel_v(scratch1, scratch0, src0);
2152       // Canonicalize the result.
2153       __ fmin_d(dst, scratch1, scratch1);
2154       break;
2155     }
2156     case kMips64F64x2Max: {
2157       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2158       Simd128Register dst = i.OutputSimd128Register();
2159       Simd128Register src0 = i.InputSimd128Register(0);
2160       Simd128Register src1 = i.InputSimd128Register(1);
2161       Simd128Register scratch0 = kSimd128RegZero;
2162       Simd128Register scratch1 = kSimd128ScratchReg;
2163 
2164       // If inputs are -0.0. and +0.0, then write +0.0 to scratch1.
2165       // scratch1 = (src0 == src1) ?  (src0 & src1) : (src1 & src1).
2166       __ fseq_d(scratch0, src0, src1);
2167       __ bsel_v(scratch0, src1, src0);
2168       __ and_v(scratch1, scratch0, src1);
2169       // scratch0 = isNaN(src0) ? src0 : scratch1.
2170       __ fseq_d(scratch0, src0, src0);
2171       __ bsel_v(scratch0, src0, scratch1);
2172       // scratch1 = (scratch0 < src0) ? src0 : scratch0.
2173       __ fslt_d(scratch1, scratch0, src0);
2174       __ bsel_v(scratch1, scratch0, src0);
2175       // Canonicalize the result.
2176       __ fmax_d(dst, scratch1, scratch1);
2177       break;
2178     }
2179     case kMips64F64x2Eq: {
2180       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2181       __ fceq_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2182                 i.InputSimd128Register(1));
2183       break;
2184     }
2185     case kMips64F64x2Ne: {
2186       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2187       __ fcune_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2188                  i.InputSimd128Register(1));
2189       break;
2190     }
2191     case kMips64F64x2Lt: {
2192       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2193       __ fclt_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2194                 i.InputSimd128Register(1));
2195       break;
2196     }
2197     case kMips64F64x2Le: {
2198       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2199       __ fcle_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2200                 i.InputSimd128Register(1));
2201       break;
2202     }
2203     case kMips64F64x2Splat: {
2204       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2205       __ Move(kScratchReg, i.InputDoubleRegister(0));
2206       __ fill_d(i.OutputSimd128Register(), kScratchReg);
2207       break;
2208     }
2209     case kMips64F64x2ExtractLane: {
2210       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2211       __ copy_s_d(kScratchReg, i.InputSimd128Register(0), i.InputInt8(1));
2212       __ Move(i.OutputDoubleRegister(), kScratchReg);
2213       break;
2214     }
2215     case kMips64F64x2ReplaceLane: {
2216       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2217       Simd128Register src = i.InputSimd128Register(0);
2218       Simd128Register dst = i.OutputSimd128Register();
2219       __ Move(kScratchReg, i.InputDoubleRegister(2));
2220       if (dst != src) {
2221         __ move_v(dst, src);
2222       }
2223       __ insert_d(dst, i.InputInt8(1), kScratchReg);
2224       break;
2225     }
2226     case kMips64I64x2Splat: {
2227       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2228       __ fill_d(i.OutputSimd128Register(), i.InputRegister(0));
2229       break;
2230     }
2231     case kMips64I64x2ExtractLane: {
2232       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2233       __ copy_s_d(i.OutputRegister(), i.InputSimd128Register(0),
2234                   i.InputInt8(1));
2235       break;
2236     }
2237     case kMips64F64x2Pmin: {
2238       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2239       Simd128Register dst = i.OutputSimd128Register();
2240       Simd128Register lhs = i.InputSimd128Register(0);
2241       Simd128Register rhs = i.InputSimd128Register(1);
2242       // dst = rhs < lhs ? rhs : lhs
2243       __ fclt_d(dst, rhs, lhs);
2244       __ bsel_v(dst, lhs, rhs);
2245       break;
2246     }
2247     case kMips64F64x2Pmax: {
2248       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2249       Simd128Register dst = i.OutputSimd128Register();
2250       Simd128Register lhs = i.InputSimd128Register(0);
2251       Simd128Register rhs = i.InputSimd128Register(1);
2252       // dst = lhs < rhs ? rhs : lhs
2253       __ fclt_d(dst, lhs, rhs);
2254       __ bsel_v(dst, lhs, rhs);
2255       break;
2256     }
2257     case kMips64F64x2Ceil: {
2258       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2259       __ MSARoundD(i.OutputSimd128Register(), i.InputSimd128Register(0),
2260                    kRoundToPlusInf);
2261       break;
2262     }
2263     case kMips64F64x2Floor: {
2264       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2265       __ MSARoundD(i.OutputSimd128Register(), i.InputSimd128Register(0),
2266                    kRoundToMinusInf);
2267       break;
2268     }
2269     case kMips64F64x2Trunc: {
2270       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2271       __ MSARoundD(i.OutputSimd128Register(), i.InputSimd128Register(0),
2272                    kRoundToZero);
2273       break;
2274     }
2275     case kMips64F64x2NearestInt: {
2276       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2277       __ MSARoundD(i.OutputSimd128Register(), i.InputSimd128Register(0),
2278                    kRoundToNearest);
2279       break;
2280     }
2281     case kMips64F64x2ConvertLowI32x4S: {
2282       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2283       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2284       __ ilvr_w(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(0));
2285       __ slli_d(kSimd128RegZero, kSimd128RegZero, 32);
2286       __ srai_d(kSimd128RegZero, kSimd128RegZero, 32);
2287       __ ffint_s_d(i.OutputSimd128Register(), kSimd128RegZero);
2288       break;
2289     }
2290     case kMips64F64x2ConvertLowI32x4U: {
2291       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2292       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2293       __ ilvr_w(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(0));
2294       __ ffint_u_d(i.OutputSimd128Register(), kSimd128RegZero);
2295       break;
2296     }
2297     case kMips64F64x2PromoteLowF32x4: {
2298       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2299       __ fexupr_d(i.OutputSimd128Register(), i.InputSimd128Register(0));
2300       break;
2301     }
2302     case kMips64I64x2ReplaceLane: {
2303       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2304       Simd128Register src = i.InputSimd128Register(0);
2305       Simd128Register dst = i.OutputSimd128Register();
2306       if (src != dst) {
2307         __ move_v(dst, src);
2308       }
2309       __ insert_d(dst, i.InputInt8(1), i.InputRegister(2));
2310       break;
2311     }
2312     case kMips64I64x2Add: {
2313       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2314       __ addv_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2315                 i.InputSimd128Register(1));
2316       break;
2317     }
2318     case kMips64I64x2Sub: {
2319       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2320       __ subv_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2321                 i.InputSimd128Register(1));
2322       break;
2323     }
2324     case kMips64I64x2Mul: {
2325       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2326       __ mulv_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2327                 i.InputSimd128Register(1));
2328       break;
2329     }
2330     case kMips64I64x2Neg: {
2331       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2332       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2333       __ subv_d(i.OutputSimd128Register(), kSimd128RegZero,
2334                 i.InputSimd128Register(0));
2335       break;
2336     }
2337     case kMips64I64x2Shl: {
2338       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2339       if (instr->InputAt(1)->IsRegister()) {
2340         __ fill_d(kSimd128ScratchReg, i.InputRegister(1));
2341         __ sll_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2342                  kSimd128ScratchReg);
2343       } else {
2344         __ slli_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2345                   i.InputInt6(1));
2346       }
2347       break;
2348     }
2349     case kMips64I64x2ShrS: {
2350       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2351       if (instr->InputAt(1)->IsRegister()) {
2352         __ fill_d(kSimd128ScratchReg, i.InputRegister(1));
2353         __ sra_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2354                  kSimd128ScratchReg);
2355       } else {
2356         __ srai_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2357                   i.InputInt6(1));
2358       }
2359       break;
2360     }
2361     case kMips64I64x2ShrU: {
2362       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2363       if (instr->InputAt(1)->IsRegister()) {
2364         __ fill_d(kSimd128ScratchReg, i.InputRegister(1));
2365         __ srl_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2366                  kSimd128ScratchReg);
2367       } else {
2368         __ srli_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2369                   i.InputInt6(1));
2370       }
2371       break;
2372     }
2373     case kMips64I64x2BitMask: {
2374       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2375       Register dst = i.OutputRegister();
2376       Simd128Register src = i.InputSimd128Register(0);
2377       Simd128Register scratch0 = kSimd128RegZero;
2378       Simd128Register scratch1 = kSimd128ScratchReg;
2379       __ srli_d(scratch0, src, 63);
2380       __ shf_w(scratch1, scratch0, 0x02);
2381       __ slli_d(scratch1, scratch1, 1);
2382       __ or_v(scratch0, scratch0, scratch1);
2383       __ copy_u_b(dst, scratch0, 0);
2384       break;
2385     }
2386     case kMips64I64x2Eq: {
2387       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2388       __ ceq_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2389                i.InputSimd128Register(1));
2390       break;
2391     }
2392     case kMips64I64x2Ne: {
2393       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2394       __ ceq_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2395                i.InputSimd128Register(1));
2396       __ nor_v(i.OutputSimd128Register(), i.OutputSimd128Register(),
2397                i.OutputSimd128Register());
2398       break;
2399     }
2400     case kMips64I64x2GtS: {
2401       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2402       __ clt_s_d(i.OutputSimd128Register(), i.InputSimd128Register(1),
2403                  i.InputSimd128Register(0));
2404       break;
2405     }
2406     case kMips64I64x2GeS: {
2407       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2408       __ cle_s_d(i.OutputSimd128Register(), i.InputSimd128Register(1),
2409                  i.InputSimd128Register(0));
2410       break;
2411     }
2412     case kMips64I64x2Abs: {
2413       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2414       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2415       __ add_a_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
2416                  kSimd128RegZero);
2417       break;
2418     }
2419     case kMips64I64x2SConvertI32x4Low: {
2420       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2421       Simd128Register dst = i.OutputSimd128Register();
2422       Simd128Register src = i.InputSimd128Register(0);
2423       __ ilvr_w(kSimd128ScratchReg, src, src);
2424       __ slli_d(dst, kSimd128ScratchReg, 32);
2425       __ srai_d(dst, dst, 32);
2426       break;
2427     }
2428     case kMips64I64x2SConvertI32x4High: {
2429       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2430       Simd128Register dst = i.OutputSimd128Register();
2431       Simd128Register src = i.InputSimd128Register(0);
2432       __ ilvl_w(kSimd128ScratchReg, src, src);
2433       __ slli_d(dst, kSimd128ScratchReg, 32);
2434       __ srai_d(dst, dst, 32);
2435       break;
2436     }
2437     case kMips64I64x2UConvertI32x4Low: {
2438       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2439       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2440       __ ilvr_w(i.OutputSimd128Register(), kSimd128RegZero,
2441                 i.InputSimd128Register(0));
2442       break;
2443     }
2444     case kMips64I64x2UConvertI32x4High: {
2445       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2446       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2447       __ ilvl_w(i.OutputSimd128Register(), kSimd128RegZero,
2448                 i.InputSimd128Register(0));
2449       break;
2450     }
2451     case kMips64ExtMulLow: {
2452       auto dt = static_cast<MSADataType>(MiscField::decode(instr->opcode()));
2453       __ ExtMulLow(dt, i.OutputSimd128Register(), i.InputSimd128Register(0),
2454                    i.InputSimd128Register(1));
2455       break;
2456     }
2457     case kMips64ExtMulHigh: {
2458       auto dt = static_cast<MSADataType>(MiscField::decode(instr->opcode()));
2459       __ ExtMulHigh(dt, i.OutputSimd128Register(), i.InputSimd128Register(0),
2460                     i.InputSimd128Register(1));
2461       break;
2462     }
2463     case kMips64ExtAddPairwise: {
2464       auto dt = static_cast<MSADataType>(MiscField::decode(instr->opcode()));
2465       __ ExtAddPairwise(dt, i.OutputSimd128Register(),
2466                         i.InputSimd128Register(0));
2467       break;
2468     }
2469     case kMips64F32x4Splat: {
2470       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2471       __ FmoveLow(kScratchReg, i.InputSingleRegister(0));
2472       __ fill_w(i.OutputSimd128Register(), kScratchReg);
2473       break;
2474     }
2475     case kMips64F32x4ExtractLane: {
2476       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2477       __ copy_u_w(kScratchReg, i.InputSimd128Register(0), i.InputInt8(1));
2478       __ FmoveLow(i.OutputSingleRegister(), kScratchReg);
2479       break;
2480     }
2481     case kMips64F32x4ReplaceLane: {
2482       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2483       Simd128Register src = i.InputSimd128Register(0);
2484       Simd128Register dst = i.OutputSimd128Register();
2485       __ FmoveLow(kScratchReg, i.InputSingleRegister(2));
2486       if (dst != src) {
2487         __ move_v(dst, src);
2488       }
2489       __ insert_w(dst, i.InputInt8(1), kScratchReg);
2490       break;
2491     }
2492     case kMips64F32x4SConvertI32x4: {
2493       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2494       __ ffint_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2495       break;
2496     }
2497     case kMips64F32x4UConvertI32x4: {
2498       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2499       __ ffint_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2500       break;
2501     }
2502     case kMips64I32x4Mul: {
2503       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2504       __ mulv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2505                 i.InputSimd128Register(1));
2506       break;
2507     }
2508     case kMips64I32x4MaxS: {
2509       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2510       __ max_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2511                  i.InputSimd128Register(1));
2512       break;
2513     }
2514     case kMips64I32x4MinS: {
2515       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2516       __ min_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2517                  i.InputSimd128Register(1));
2518       break;
2519     }
2520     case kMips64I32x4Eq: {
2521       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2522       __ ceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2523                i.InputSimd128Register(1));
2524       break;
2525     }
2526     case kMips64I32x4Ne: {
2527       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2528       Simd128Register dst = i.OutputSimd128Register();
2529       __ ceq_w(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
2530       __ nor_v(dst, dst, dst);
2531       break;
2532     }
2533     case kMips64I32x4Shl: {
2534       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2535       if (instr->InputAt(1)->IsRegister()) {
2536         __ fill_w(kSimd128ScratchReg, i.InputRegister(1));
2537         __ sll_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2538                  kSimd128ScratchReg);
2539       } else {
2540         __ slli_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2541                   i.InputInt5(1));
2542       }
2543       break;
2544     }
2545     case kMips64I32x4ShrS: {
2546       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2547       if (instr->InputAt(1)->IsRegister()) {
2548         __ fill_w(kSimd128ScratchReg, i.InputRegister(1));
2549         __ sra_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2550                  kSimd128ScratchReg);
2551       } else {
2552         __ srai_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2553                   i.InputInt5(1));
2554       }
2555       break;
2556     }
2557     case kMips64I32x4ShrU: {
2558       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2559       if (instr->InputAt(1)->IsRegister()) {
2560         __ fill_w(kSimd128ScratchReg, i.InputRegister(1));
2561         __ srl_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2562                  kSimd128ScratchReg);
2563       } else {
2564         __ srli_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2565                   i.InputInt5(1));
2566       }
2567       break;
2568     }
2569     case kMips64I32x4MaxU: {
2570       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2571       __ max_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2572                  i.InputSimd128Register(1));
2573       break;
2574     }
2575     case kMips64I32x4MinU: {
2576       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2577       __ min_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2578                  i.InputSimd128Register(1));
2579       break;
2580     }
2581     case kMips64S128Select: {
2582       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2583       DCHECK(i.OutputSimd128Register() == i.InputSimd128Register(0));
2584       __ bsel_v(i.OutputSimd128Register(), i.InputSimd128Register(2),
2585                 i.InputSimd128Register(1));
2586       break;
2587     }
2588     case kMips64S128AndNot: {
2589       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2590       Simd128Register scratch = kSimd128ScratchReg,
2591                       dst = i.OutputSimd128Register(),
2592                       src0 = i.InputSimd128Register(0),
2593                       src1 = i.InputSimd128Register(1);
2594       __ nor_v(scratch, src1, src1);
2595       __ and_v(dst, scratch, src0);
2596       break;
2597     }
2598     case kMips64F32x4Abs: {
2599       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2600       __ bclri_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
2601       break;
2602     }
2603     case kMips64F32x4Neg: {
2604       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2605       __ bnegi_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
2606       break;
2607     }
2608     case kMips64F32x4RecipApprox: {
2609       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2610       __ frcp_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2611       break;
2612     }
2613     case kMips64F32x4RecipSqrtApprox: {
2614       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2615       __ frsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2616       break;
2617     }
2618     case kMips64F32x4Add: {
2619       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2620       __ fadd_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2621                 i.InputSimd128Register(1));
2622       break;
2623     }
2624     case kMips64F32x4Sub: {
2625       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2626       __ fsub_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2627                 i.InputSimd128Register(1));
2628       break;
2629     }
2630     case kMips64F32x4Mul: {
2631       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2632       __ fmul_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2633                 i.InputSimd128Register(1));
2634       break;
2635     }
2636     case kMips64F32x4Div: {
2637       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2638       __ fdiv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2639                 i.InputSimd128Register(1));
2640       break;
2641     }
2642     case kMips64F32x4Max: {
2643       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2644       Simd128Register dst = i.OutputSimd128Register();
2645       Simd128Register src0 = i.InputSimd128Register(0);
2646       Simd128Register src1 = i.InputSimd128Register(1);
2647       Simd128Register scratch0 = kSimd128RegZero;
2648       Simd128Register scratch1 = kSimd128ScratchReg;
2649 
2650       // If inputs are -0.0. and +0.0, then write +0.0 to scratch1.
2651       // scratch1 = (src0 == src1) ?  (src0 & src1) : (src1 & src1).
2652       __ fseq_w(scratch0, src0, src1);
2653       __ bsel_v(scratch0, src1, src0);
2654       __ and_v(scratch1, scratch0, src1);
2655       // scratch0 = isNaN(src0) ? src0 : scratch1.
2656       __ fseq_w(scratch0, src0, src0);
2657       __ bsel_v(scratch0, src0, scratch1);
2658       // scratch1 = (scratch0 < src0) ? src0 : scratch0.
2659       __ fslt_w(scratch1, scratch0, src0);
2660       __ bsel_v(scratch1, scratch0, src0);
2661       // Canonicalize the result.
2662       __ fmax_w(dst, scratch1, scratch1);
2663       break;
2664     }
2665     case kMips64F32x4Min: {
2666       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2667       Simd128Register dst = i.OutputSimd128Register();
2668       Simd128Register src0 = i.InputSimd128Register(0);
2669       Simd128Register src1 = i.InputSimd128Register(1);
2670       Simd128Register scratch0 = kSimd128RegZero;
2671       Simd128Register scratch1 = kSimd128ScratchReg;
2672 
2673       // If inputs are -0.0. and +0.0, then write -0.0 to scratch1.
2674       // scratch1 = (src0 == src1) ?  (src0 | src1) : (src1 | src1).
2675       __ fseq_w(scratch0, src0, src1);
2676       __ bsel_v(scratch0, src1, src0);
2677       __ or_v(scratch1, scratch0, src1);
2678       // scratch0 = isNaN(src0) ? src0 : scratch1.
2679       __ fseq_w(scratch0, src0, src0);
2680       __ bsel_v(scratch0, src0, scratch1);
2681       // scratch1 = (src0 < scratch0) ? src0 : scratch0.
2682       __ fslt_w(scratch1, src0, scratch0);
2683       __ bsel_v(scratch1, scratch0, src0);
2684       // Canonicalize the result.
2685       __ fmin_w(dst, scratch1, scratch1);
2686       break;
2687     }
2688     case kMips64F32x4Eq: {
2689       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2690       __ fceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2691                 i.InputSimd128Register(1));
2692       break;
2693     }
2694     case kMips64F32x4Ne: {
2695       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2696       __ fcune_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2697                  i.InputSimd128Register(1));
2698       break;
2699     }
2700     case kMips64F32x4Lt: {
2701       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2702       __ fclt_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2703                 i.InputSimd128Register(1));
2704       break;
2705     }
2706     case kMips64F32x4Le: {
2707       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2708       __ fcle_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2709                 i.InputSimd128Register(1));
2710       break;
2711     }
2712     case kMips64F32x4Pmin: {
2713       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2714       Simd128Register dst = i.OutputSimd128Register();
2715       Simd128Register lhs = i.InputSimd128Register(0);
2716       Simd128Register rhs = i.InputSimd128Register(1);
2717       // dst = rhs < lhs ? rhs : lhs
2718       __ fclt_w(dst, rhs, lhs);
2719       __ bsel_v(dst, lhs, rhs);
2720       break;
2721     }
2722     case kMips64F32x4Pmax: {
2723       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2724       Simd128Register dst = i.OutputSimd128Register();
2725       Simd128Register lhs = i.InputSimd128Register(0);
2726       Simd128Register rhs = i.InputSimd128Register(1);
2727       // dst = lhs < rhs ? rhs : lhs
2728       __ fclt_w(dst, lhs, rhs);
2729       __ bsel_v(dst, lhs, rhs);
2730       break;
2731     }
2732     case kMips64F32x4Ceil: {
2733       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2734       __ MSARoundW(i.OutputSimd128Register(), i.InputSimd128Register(0),
2735                    kRoundToPlusInf);
2736       break;
2737     }
2738     case kMips64F32x4Floor: {
2739       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2740       __ MSARoundW(i.OutputSimd128Register(), i.InputSimd128Register(0),
2741                    kRoundToMinusInf);
2742       break;
2743     }
2744     case kMips64F32x4Trunc: {
2745       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2746       __ MSARoundW(i.OutputSimd128Register(), i.InputSimd128Register(0),
2747                    kRoundToZero);
2748       break;
2749     }
2750     case kMips64F32x4NearestInt: {
2751       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2752       __ MSARoundW(i.OutputSimd128Register(), i.InputSimd128Register(0),
2753                    kRoundToNearest);
2754       break;
2755     }
2756     case kMips64F32x4DemoteF64x2Zero: {
2757       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2758       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2759       __ fexdo_w(i.OutputSimd128Register(), kSimd128RegZero,
2760                  i.InputSimd128Register(0));
2761       break;
2762     }
2763     case kMips64I32x4SConvertF32x4: {
2764       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2765       __ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2766       break;
2767     }
2768     case kMips64I32x4UConvertF32x4: {
2769       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2770       __ ftrunc_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2771       break;
2772     }
2773     case kMips64F32x4Sqrt: {
2774       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2775       __ fsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2776       break;
2777     }
2778     case kMips64I32x4Neg: {
2779       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2780       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2781       __ subv_w(i.OutputSimd128Register(), kSimd128RegZero,
2782                 i.InputSimd128Register(0));
2783       break;
2784     }
2785     case kMips64I32x4GtS: {
2786       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2787       __ clt_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
2788                  i.InputSimd128Register(0));
2789       break;
2790     }
2791     case kMips64I32x4GeS: {
2792       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2793       __ cle_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
2794                  i.InputSimd128Register(0));
2795       break;
2796     }
2797     case kMips64I32x4GtU: {
2798       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2799       __ clt_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
2800                  i.InputSimd128Register(0));
2801       break;
2802     }
2803     case kMips64I32x4GeU: {
2804       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2805       __ cle_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
2806                  i.InputSimd128Register(0));
2807       break;
2808     }
2809     case kMips64I32x4Abs: {
2810       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2811       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2812       __ asub_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2813                   kSimd128RegZero);
2814       break;
2815     }
2816     case kMips64I32x4BitMask: {
2817       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2818       Register dst = i.OutputRegister();
2819       Simd128Register src = i.InputSimd128Register(0);
2820       Simd128Register scratch0 = kSimd128RegZero;
2821       Simd128Register scratch1 = kSimd128ScratchReg;
2822       __ srli_w(scratch0, src, 31);
2823       __ srli_d(scratch1, scratch0, 31);
2824       __ or_v(scratch0, scratch0, scratch1);
2825       __ shf_w(scratch1, scratch0, 0x0E);
2826       __ slli_d(scratch1, scratch1, 2);
2827       __ or_v(scratch0, scratch0, scratch1);
2828       __ copy_u_b(dst, scratch0, 0);
2829       break;
2830     }
2831     case kMips64I32x4DotI16x8S: {
2832       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2833       __ dotp_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2834                   i.InputSimd128Register(1));
2835       break;
2836     }
2837     case kMips64I32x4TruncSatF64x2SZero: {
2838       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2839       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2840       __ ftrunc_s_d(kSimd128ScratchReg, i.InputSimd128Register(0));
2841       __ sat_s_d(kSimd128ScratchReg, kSimd128ScratchReg, 31);
2842       __ pckev_w(i.OutputSimd128Register(), kSimd128RegZero,
2843                  kSimd128ScratchReg);
2844       break;
2845     }
2846     case kMips64I32x4TruncSatF64x2UZero: {
2847       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2848       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2849       __ ftrunc_u_d(kSimd128ScratchReg, i.InputSimd128Register(0));
2850       __ sat_u_d(kSimd128ScratchReg, kSimd128ScratchReg, 31);
2851       __ pckev_w(i.OutputSimd128Register(), kSimd128RegZero,
2852                  kSimd128ScratchReg);
2853       break;
2854     }
2855     case kMips64I16x8Splat: {
2856       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2857       __ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
2858       break;
2859     }
2860     case kMips64I16x8ExtractLaneU: {
2861       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2862       __ copy_u_h(i.OutputRegister(), i.InputSimd128Register(0),
2863                   i.InputInt8(1));
2864       break;
2865     }
2866     case kMips64I16x8ExtractLaneS: {
2867       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2868       __ copy_s_h(i.OutputRegister(), i.InputSimd128Register(0),
2869                   i.InputInt8(1));
2870       break;
2871     }
2872     case kMips64I16x8ReplaceLane: {
2873       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2874       Simd128Register src = i.InputSimd128Register(0);
2875       Simd128Register dst = i.OutputSimd128Register();
2876       if (src != dst) {
2877         __ move_v(dst, src);
2878       }
2879       __ insert_h(dst, i.InputInt8(1), i.InputRegister(2));
2880       break;
2881     }
2882     case kMips64I16x8Neg: {
2883       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2884       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2885       __ subv_h(i.OutputSimd128Register(), kSimd128RegZero,
2886                 i.InputSimd128Register(0));
2887       break;
2888     }
2889     case kMips64I16x8Shl: {
2890       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2891       if (instr->InputAt(1)->IsRegister()) {
2892         __ fill_h(kSimd128ScratchReg, i.InputRegister(1));
2893         __ sll_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2894                  kSimd128ScratchReg);
2895       } else {
2896         __ slli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2897                   i.InputInt4(1));
2898       }
2899       break;
2900     }
2901     case kMips64I16x8ShrS: {
2902       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2903       if (instr->InputAt(1)->IsRegister()) {
2904         __ fill_h(kSimd128ScratchReg, i.InputRegister(1));
2905         __ sra_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2906                  kSimd128ScratchReg);
2907       } else {
2908         __ srai_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2909                   i.InputInt4(1));
2910       }
2911       break;
2912     }
2913     case kMips64I16x8ShrU: {
2914       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2915       if (instr->InputAt(1)->IsRegister()) {
2916         __ fill_h(kSimd128ScratchReg, i.InputRegister(1));
2917         __ srl_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2918                  kSimd128ScratchReg);
2919       } else {
2920         __ srli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2921                   i.InputInt4(1));
2922       }
2923       break;
2924     }
2925     case kMips64I16x8Add: {
2926       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2927       __ addv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2928                 i.InputSimd128Register(1));
2929       break;
2930     }
2931     case kMips64I16x8AddSatS: {
2932       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2933       __ adds_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2934                   i.InputSimd128Register(1));
2935       break;
2936     }
2937     case kMips64I16x8Sub: {
2938       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2939       __ subv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2940                 i.InputSimd128Register(1));
2941       break;
2942     }
2943     case kMips64I16x8SubSatS: {
2944       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2945       __ subs_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2946                   i.InputSimd128Register(1));
2947       break;
2948     }
2949     case kMips64I16x8Mul: {
2950       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2951       __ mulv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2952                 i.InputSimd128Register(1));
2953       break;
2954     }
2955     case kMips64I16x8MaxS: {
2956       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2957       __ max_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2958                  i.InputSimd128Register(1));
2959       break;
2960     }
2961     case kMips64I16x8MinS: {
2962       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2963       __ min_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2964                  i.InputSimd128Register(1));
2965       break;
2966     }
2967     case kMips64I16x8Eq: {
2968       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2969       __ ceq_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2970                i.InputSimd128Register(1));
2971       break;
2972     }
2973     case kMips64I16x8Ne: {
2974       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2975       Simd128Register dst = i.OutputSimd128Register();
2976       __ ceq_h(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
2977       __ nor_v(dst, dst, dst);
2978       break;
2979     }
2980     case kMips64I16x8GtS: {
2981       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2982       __ clt_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
2983                  i.InputSimd128Register(0));
2984       break;
2985     }
2986     case kMips64I16x8GeS: {
2987       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2988       __ cle_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
2989                  i.InputSimd128Register(0));
2990       break;
2991     }
2992     case kMips64I16x8AddSatU: {
2993       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2994       __ adds_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2995                   i.InputSimd128Register(1));
2996       break;
2997     }
2998     case kMips64I16x8SubSatU: {
2999       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3000       __ subs_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
3001                   i.InputSimd128Register(1));
3002       break;
3003     }
3004     case kMips64I16x8MaxU: {
3005       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3006       __ max_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
3007                  i.InputSimd128Register(1));
3008       break;
3009     }
3010     case kMips64I16x8MinU: {
3011       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3012       __ min_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
3013                  i.InputSimd128Register(1));
3014       break;
3015     }
3016     case kMips64I16x8GtU: {
3017       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3018       __ clt_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
3019                  i.InputSimd128Register(0));
3020       break;
3021     }
3022     case kMips64I16x8GeU: {
3023       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3024       __ cle_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
3025                  i.InputSimd128Register(0));
3026       break;
3027     }
3028     case kMips64I16x8RoundingAverageU: {
3029       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3030       __ aver_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
3031                   i.InputSimd128Register(0));
3032       break;
3033     }
3034     case kMips64I16x8Abs: {
3035       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3036       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
3037       __ asub_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
3038                   kSimd128RegZero);
3039       break;
3040     }
3041     case kMips64I16x8BitMask: {
3042       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3043       Register dst = i.OutputRegister();
3044       Simd128Register src = i.InputSimd128Register(0);
3045       Simd128Register scratch0 = kSimd128RegZero;
3046       Simd128Register scratch1 = kSimd128ScratchReg;
3047       __ srli_h(scratch0, src, 15);
3048       __ srli_w(scratch1, scratch0, 15);
3049       __ or_v(scratch0, scratch0, scratch1);
3050       __ srli_d(scratch1, scratch0, 30);
3051       __ or_v(scratch0, scratch0, scratch1);
3052       __ shf_w(scratch1, scratch0, 0x0E);
3053       __ slli_d(scratch1, scratch1, 4);
3054       __ or_v(scratch0, scratch0, scratch1);
3055       __ copy_u_b(dst, scratch0, 0);
3056       break;
3057     }
3058     case kMips64I16x8Q15MulRSatS: {
3059       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3060       __ mulr_q_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
3061                   i.InputSimd128Register(1));
3062       break;
3063     }
3064     case kMips64I8x16Splat: {
3065       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3066       __ fill_b(i.OutputSimd128Register(), i.InputRegister(0));
3067       break;
3068     }
3069     case kMips64I8x16ExtractLaneU: {
3070       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3071       __ copy_u_b(i.OutputRegister(), i.InputSimd128Register(0),
3072                   i.InputInt8(1));
3073       break;
3074     }
3075     case kMips64I8x16ExtractLaneS: {
3076       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3077       __ copy_s_b(i.OutputRegister(), i.InputSimd128Register(0),
3078                   i.InputInt8(1));
3079       break;
3080     }
3081     case kMips64I8x16ReplaceLane: {
3082       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3083       Simd128Register src = i.InputSimd128Register(0);
3084       Simd128Register dst = i.OutputSimd128Register();
3085       if (src != dst) {
3086         __ move_v(dst, src);
3087       }
3088       __ insert_b(dst, i.InputInt8(1), i.InputRegister(2));
3089       break;
3090     }
3091     case kMips64I8x16Neg: {
3092       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3093       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
3094       __ subv_b(i.OutputSimd128Register(), kSimd128RegZero,
3095                 i.InputSimd128Register(0));
3096       break;
3097     }
3098     case kMips64I8x16Shl: {
3099       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3100       if (instr->InputAt(1)->IsRegister()) {
3101         __ fill_b(kSimd128ScratchReg, i.InputRegister(1));
3102         __ sll_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3103                  kSimd128ScratchReg);
3104       } else {
3105         __ slli_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3106                   i.InputInt3(1));
3107       }
3108       break;
3109     }
3110     case kMips64I8x16ShrS: {
3111       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3112       if (instr->InputAt(1)->IsRegister()) {
3113         __ fill_b(kSimd128ScratchReg, i.InputRegister(1));
3114         __ sra_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3115                  kSimd128ScratchReg);
3116       } else {
3117         __ srai_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3118                   i.InputInt3(1));
3119       }
3120       break;
3121     }
3122     case kMips64I8x16Add: {
3123       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3124       __ addv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3125                 i.InputSimd128Register(1));
3126       break;
3127     }
3128     case kMips64I8x16AddSatS: {
3129       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3130       __ adds_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3131                   i.InputSimd128Register(1));
3132       break;
3133     }
3134     case kMips64I8x16Sub: {
3135       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3136       __ subv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3137                 i.InputSimd128Register(1));
3138       break;
3139     }
3140     case kMips64I8x16SubSatS: {
3141       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3142       __ subs_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3143                   i.InputSimd128Register(1));
3144       break;
3145     }
3146     case kMips64I8x16MaxS: {
3147       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3148       __ max_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3149                  i.InputSimd128Register(1));
3150       break;
3151     }
3152     case kMips64I8x16MinS: {
3153       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3154       __ min_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3155                  i.InputSimd128Register(1));
3156       break;
3157     }
3158     case kMips64I8x16Eq: {
3159       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3160       __ ceq_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3161                i.InputSimd128Register(1));
3162       break;
3163     }
3164     case kMips64I8x16Ne: {
3165       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3166       Simd128Register dst = i.OutputSimd128Register();
3167       __ ceq_b(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
3168       __ nor_v(dst, dst, dst);
3169       break;
3170     }
3171     case kMips64I8x16GtS: {
3172       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3173       __ clt_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
3174                  i.InputSimd128Register(0));
3175       break;
3176     }
3177     case kMips64I8x16GeS: {
3178       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3179       __ cle_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
3180                  i.InputSimd128Register(0));
3181       break;
3182     }
3183     case kMips64I8x16ShrU: {
3184       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3185       if (instr->InputAt(1)->IsRegister()) {
3186         __ fill_b(kSimd128ScratchReg, i.InputRegister(1));
3187         __ srl_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3188                  kSimd128ScratchReg);
3189       } else {
3190         __ srli_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3191                   i.InputInt3(1));
3192       }
3193       break;
3194     }
3195     case kMips64I8x16AddSatU: {
3196       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3197       __ adds_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3198                   i.InputSimd128Register(1));
3199       break;
3200     }
3201     case kMips64I8x16SubSatU: {
3202       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3203       __ subs_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3204                   i.InputSimd128Register(1));
3205       break;
3206     }
3207     case kMips64I8x16MaxU: {
3208       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3209       __ max_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3210                  i.InputSimd128Register(1));
3211       break;
3212     }
3213     case kMips64I8x16MinU: {
3214       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3215       __ min_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3216                  i.InputSimd128Register(1));
3217       break;
3218     }
3219     case kMips64I8x16GtU: {
3220       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3221       __ clt_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
3222                  i.InputSimd128Register(0));
3223       break;
3224     }
3225     case kMips64I8x16GeU: {
3226       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3227       __ cle_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
3228                  i.InputSimd128Register(0));
3229       break;
3230     }
3231     case kMips64I8x16RoundingAverageU: {
3232       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3233       __ aver_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
3234                   i.InputSimd128Register(0));
3235       break;
3236     }
3237     case kMips64I8x16Abs: {
3238       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3239       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
3240       __ asub_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
3241                   kSimd128RegZero);
3242       break;
3243     }
3244     case kMips64I8x16Popcnt: {
3245       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3246       __ pcnt_b(i.OutputSimd128Register(), i.InputSimd128Register(0));
3247       break;
3248     }
3249     case kMips64I8x16BitMask: {
3250       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3251       Register dst = i.OutputRegister();
3252       Simd128Register src = i.InputSimd128Register(0);
3253       Simd128Register scratch0 = kSimd128RegZero;
3254       Simd128Register scratch1 = kSimd128ScratchReg;
3255       __ srli_b(scratch0, src, 7);
3256       __ srli_h(scratch1, scratch0, 7);
3257       __ or_v(scratch0, scratch0, scratch1);
3258       __ srli_w(scratch1, scratch0, 14);
3259       __ or_v(scratch0, scratch0, scratch1);
3260       __ srli_d(scratch1, scratch0, 28);
3261       __ or_v(scratch0, scratch0, scratch1);
3262       __ shf_w(scratch1, scratch0, 0x0E);
3263       __ ilvev_b(scratch0, scratch1, scratch0);
3264       __ copy_u_h(dst, scratch0, 0);
3265       break;
3266     }
3267     case kMips64S128And: {
3268       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3269       __ and_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
3270                i.InputSimd128Register(1));
3271       break;
3272     }
3273     case kMips64S128Or: {
3274       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3275       __ or_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
3276               i.InputSimd128Register(1));
3277       break;
3278     }
3279     case kMips64S128Xor: {
3280       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3281       __ xor_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
3282                i.InputSimd128Register(1));
3283       break;
3284     }
3285     case kMips64S128Not: {
3286       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3287       __ nor_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
3288                i.InputSimd128Register(0));
3289       break;
3290     }
3291     case kMips64V128AnyTrue: {
3292       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3293       Register dst = i.OutputRegister();
3294       Label all_false;
3295       __ BranchMSA(&all_false, MSA_BRANCH_V, all_zero,
3296                    i.InputSimd128Register(0), USE_DELAY_SLOT);
3297       __ li(dst, 0l);  // branch delay slot
3298       __ li(dst, 1);
3299       __ bind(&all_false);
3300       break;
3301     }
3302     case kMips64I64x2AllTrue: {
3303       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3304       Register dst = i.OutputRegister();
3305       Label all_true;
3306       __ BranchMSA(&all_true, MSA_BRANCH_D, all_not_zero,
3307                    i.InputSimd128Register(0), USE_DELAY_SLOT);
3308       __ li(dst, 1);  // branch delay slot
3309       __ li(dst, 0l);
3310       __ bind(&all_true);
3311       break;
3312     }
3313     case kMips64I32x4AllTrue: {
3314       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3315       Register dst = i.OutputRegister();
3316       Label all_true;
3317       __ BranchMSA(&all_true, MSA_BRANCH_W, all_not_zero,
3318                    i.InputSimd128Register(0), USE_DELAY_SLOT);
3319       __ li(dst, 1);  // branch delay slot
3320       __ li(dst, 0l);
3321       __ bind(&all_true);
3322       break;
3323     }
3324     case kMips64I16x8AllTrue: {
3325       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3326       Register dst = i.OutputRegister();
3327       Label all_true;
3328       __ BranchMSA(&all_true, MSA_BRANCH_H, all_not_zero,
3329                    i.InputSimd128Register(0), USE_DELAY_SLOT);
3330       __ li(dst, 1);  // branch delay slot
3331       __ li(dst, 0l);
3332       __ bind(&all_true);
3333       break;
3334     }
3335     case kMips64I8x16AllTrue: {
3336       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3337       Register dst = i.OutputRegister();
3338       Label all_true;
3339       __ BranchMSA(&all_true, MSA_BRANCH_B, all_not_zero,
3340                    i.InputSimd128Register(0), USE_DELAY_SLOT);
3341       __ li(dst, 1);  // branch delay slot
3342       __ li(dst, 0l);
3343       __ bind(&all_true);
3344       break;
3345     }
3346     case kMips64MsaLd: {
3347       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3348       __ ld_b(i.OutputSimd128Register(), i.MemoryOperand());
3349       break;
3350     }
3351     case kMips64MsaSt: {
3352       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3353       __ st_b(i.InputSimd128Register(2), i.MemoryOperand());
3354       break;
3355     }
3356     case kMips64S32x4InterleaveRight: {
3357       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3358       Simd128Register dst = i.OutputSimd128Register(),
3359                       src0 = i.InputSimd128Register(0),
3360                       src1 = i.InputSimd128Register(1);
3361       // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
3362       // dst = [5, 1, 4, 0]
3363       __ ilvr_w(dst, src1, src0);
3364       break;
3365     }
3366     case kMips64S32x4InterleaveLeft: {
3367       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3368       Simd128Register dst = i.OutputSimd128Register(),
3369                       src0 = i.InputSimd128Register(0),
3370                       src1 = i.InputSimd128Register(1);
3371       // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
3372       // dst = [7, 3, 6, 2]
3373       __ ilvl_w(dst, src1, src0);
3374       break;
3375     }
3376     case kMips64S32x4PackEven: {
3377       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3378       Simd128Register dst = i.OutputSimd128Register(),
3379                       src0 = i.InputSimd128Register(0),
3380                       src1 = i.InputSimd128Register(1);
3381       // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
3382       // dst = [6, 4, 2, 0]
3383       __ pckev_w(dst, src1, src0);
3384       break;
3385     }
3386     case kMips64S32x4PackOdd: {
3387       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3388       Simd128Register dst = i.OutputSimd128Register(),
3389                       src0 = i.InputSimd128Register(0),
3390                       src1 = i.InputSimd128Register(1);
3391       // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
3392       // dst = [7, 5, 3, 1]
3393       __ pckod_w(dst, src1, src0);
3394       break;
3395     }
3396     case kMips64S32x4InterleaveEven: {
3397       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3398       Simd128Register dst = i.OutputSimd128Register(),
3399                       src0 = i.InputSimd128Register(0),
3400                       src1 = i.InputSimd128Register(1);
3401       // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
3402       // dst = [6, 2, 4, 0]
3403       __ ilvev_w(dst, src1, src0);
3404       break;
3405     }
3406     case kMips64S32x4InterleaveOdd: {
3407       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3408       Simd128Register dst = i.OutputSimd128Register(),
3409                       src0 = i.InputSimd128Register(0),
3410                       src1 = i.InputSimd128Register(1);
3411       // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
3412       // dst = [7, 3, 5, 1]
3413       __ ilvod_w(dst, src1, src0);
3414       break;
3415     }
3416     case kMips64S32x4Shuffle: {
3417       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3418       Simd128Register dst = i.OutputSimd128Register(),
3419                       src0 = i.InputSimd128Register(0),
3420                       src1 = i.InputSimd128Register(1);
3421 
3422       int32_t shuffle = i.InputInt32(2);
3423 
3424       if (src0 == src1) {
3425         // Unary S32x4 shuffles are handled with shf.w instruction
3426         unsigned lane = shuffle & 0xFF;
3427         if (FLAG_debug_code) {
3428           // range of all four lanes, for unary instruction,
3429           // should belong to the same range, which can be one of these:
3430           // [0, 3] or [4, 7]
3431           if (lane >= 4) {
3432             int32_t shuffle_helper = shuffle;
3433             for (int i = 0; i < 4; ++i) {
3434               lane = shuffle_helper & 0xFF;
3435               CHECK_GE(lane, 4);
3436               shuffle_helper >>= 8;
3437             }
3438           }
3439         }
3440         uint32_t i8 = 0;
3441         for (int i = 0; i < 4; i++) {
3442           lane = shuffle & 0xFF;
3443           if (lane >= 4) {
3444             lane -= 4;
3445           }
3446           DCHECK_GT(4, lane);
3447           i8 |= lane << (2 * i);
3448           shuffle >>= 8;
3449         }
3450         __ shf_w(dst, src0, i8);
3451       } else {
3452         // For binary shuffles use vshf.w instruction
3453         if (dst == src0) {
3454           __ move_v(kSimd128ScratchReg, src0);
3455           src0 = kSimd128ScratchReg;
3456         } else if (dst == src1) {
3457           __ move_v(kSimd128ScratchReg, src1);
3458           src1 = kSimd128ScratchReg;
3459         }
3460 
3461         __ li(kScratchReg, i.InputInt32(2));
3462         __ insert_w(dst, 0, kScratchReg);
3463         __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
3464         __ ilvr_b(dst, kSimd128RegZero, dst);
3465         __ ilvr_h(dst, kSimd128RegZero, dst);
3466         __ vshf_w(dst, src1, src0);
3467       }
3468       break;
3469     }
3470     case kMips64S16x8InterleaveRight: {
3471       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3472       Simd128Register dst = i.OutputSimd128Register(),
3473                       src0 = i.InputSimd128Register(0),
3474                       src1 = i.InputSimd128Register(1);
3475       // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
3476       // dst = [11, 3, 10, 2, 9, 1, 8, 0]
3477       __ ilvr_h(dst, src1, src0);
3478       break;
3479     }
3480     case kMips64S16x8InterleaveLeft: {
3481       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3482       Simd128Register dst = i.OutputSimd128Register(),
3483                       src0 = i.InputSimd128Register(0),
3484                       src1 = i.InputSimd128Register(1);
3485       // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
3486       // dst = [15, 7, 14, 6, 13, 5, 12, 4]
3487       __ ilvl_h(dst, src1, src0);
3488       break;
3489     }
3490     case kMips64S16x8PackEven: {
3491       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3492       Simd128Register dst = i.OutputSimd128Register(),
3493                       src0 = i.InputSimd128Register(0),
3494                       src1 = i.InputSimd128Register(1);
3495       // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
3496       // dst = [14, 12, 10, 8, 6, 4, 2, 0]
3497       __ pckev_h(dst, src1, src0);
3498       break;
3499     }
3500     case kMips64S16x8PackOdd: {
3501       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3502       Simd128Register dst = i.OutputSimd128Register(),
3503                       src0 = i.InputSimd128Register(0),
3504                       src1 = i.InputSimd128Register(1);
3505       // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
3506       // dst = [15, 13, 11, 9, 7, 5, 3, 1]
3507       __ pckod_h(dst, src1, src0);
3508       break;
3509     }
3510     case kMips64S16x8InterleaveEven: {
3511       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3512       Simd128Register dst = i.OutputSimd128Register(),
3513                       src0 = i.InputSimd128Register(0),
3514                       src1 = i.InputSimd128Register(1);
3515       // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
3516       // dst = [14, 6, 12, 4, 10, 2, 8, 0]
3517       __ ilvev_h(dst, src1, src0);
3518       break;
3519     }
3520     case kMips64S16x8InterleaveOdd: {
3521       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3522       Simd128Register dst = i.OutputSimd128Register(),
3523                       src0 = i.InputSimd128Register(0),
3524                       src1 = i.InputSimd128Register(1);
3525       // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
3526       // dst = [15, 7, ... 11, 3, 9, 1]
3527       __ ilvod_h(dst, src1, src0);
3528       break;
3529     }
3530     case kMips64S16x4Reverse: {
3531       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3532       // src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [4, 5, 6, 7, 0, 1, 2, 3]
3533       // shf.df imm field: 0 1 2 3 = 00011011 = 0x1B
3534       __ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B);
3535       break;
3536     }
3537     case kMips64S16x2Reverse: {
3538       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3539       // src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [6, 7, 4, 5, 3, 2, 0, 1]
3540       // shf.df imm field: 2 3 0 1 = 10110001 = 0xB1
3541       __ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1);
3542       break;
3543     }
3544     case kMips64S8x16InterleaveRight: {
3545       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3546       Simd128Register dst = i.OutputSimd128Register(),
3547                       src0 = i.InputSimd128Register(0),
3548                       src1 = i.InputSimd128Register(1);
3549       // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
3550       // dst = [23, 7, ... 17, 1, 16, 0]
3551       __ ilvr_b(dst, src1, src0);
3552       break;
3553     }
3554     case kMips64S8x16InterleaveLeft: {
3555       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3556       Simd128Register dst = i.OutputSimd128Register(),
3557                       src0 = i.InputSimd128Register(0),
3558                       src1 = i.InputSimd128Register(1);
3559       // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
3560       // dst = [31, 15, ... 25, 9, 24, 8]
3561       __ ilvl_b(dst, src1, src0);
3562       break;
3563     }
3564     case kMips64S8x16PackEven: {
3565       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3566       Simd128Register dst = i.OutputSimd128Register(),
3567                       src0 = i.InputSimd128Register(0),
3568                       src1 = i.InputSimd128Register(1);
3569       // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
3570       // dst = [30, 28, ... 6, 4, 2, 0]
3571       __ pckev_b(dst, src1, src0);
3572       break;
3573     }
3574     case kMips64S8x16PackOdd: {
3575       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3576       Simd128Register dst = i.OutputSimd128Register(),
3577                       src0 = i.InputSimd128Register(0),
3578                       src1 = i.InputSimd128Register(1);
3579       // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
3580       // dst = [31, 29, ... 7, 5, 3, 1]
3581       __ pckod_b(dst, src1, src0);
3582       break;
3583     }
3584     case kMips64S8x16InterleaveEven: {
3585       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3586       Simd128Register dst = i.OutputSimd128Register(),
3587                       src0 = i.InputSimd128Register(0),
3588                       src1 = i.InputSimd128Register(1);
3589       // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
3590       // dst = [30, 14, ... 18, 2, 16, 0]
3591       __ ilvev_b(dst, src1, src0);
3592       break;
3593     }
3594     case kMips64S8x16InterleaveOdd: {
3595       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3596       Simd128Register dst = i.OutputSimd128Register(),
3597                       src0 = i.InputSimd128Register(0),
3598                       src1 = i.InputSimd128Register(1);
3599       // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
3600       // dst = [31, 15, ... 19, 3, 17, 1]
3601       __ ilvod_b(dst, src1, src0);
3602       break;
3603     }
3604     case kMips64S8x16Concat: {
3605       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3606       Simd128Register dst = i.OutputSimd128Register();
3607       DCHECK(dst == i.InputSimd128Register(0));
3608       __ sldi_b(dst, i.InputSimd128Register(1), i.InputInt4(2));
3609       break;
3610     }
3611     case kMips64I8x16Shuffle: {
3612       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3613       Simd128Register dst = i.OutputSimd128Register(),
3614                       src0 = i.InputSimd128Register(0),
3615                       src1 = i.InputSimd128Register(1);
3616 
3617       if (dst == src0) {
3618         __ move_v(kSimd128ScratchReg, src0);
3619         src0 = kSimd128ScratchReg;
3620       } else if (dst == src1) {
3621         __ move_v(kSimd128ScratchReg, src1);
3622         src1 = kSimd128ScratchReg;
3623       }
3624 
3625       int64_t control_low =
3626           static_cast<int64_t>(i.InputInt32(3)) << 32 | i.InputInt32(2);
3627       int64_t control_hi =
3628           static_cast<int64_t>(i.InputInt32(5)) << 32 | i.InputInt32(4);
3629       __ li(kScratchReg, control_low);
3630       __ insert_d(dst, 0, kScratchReg);
3631       __ li(kScratchReg, control_hi);
3632       __ insert_d(dst, 1, kScratchReg);
3633       __ vshf_b(dst, src1, src0);
3634       break;
3635     }
3636     case kMips64I8x16Swizzle: {
3637       Simd128Register dst = i.OutputSimd128Register(),
3638                       tbl = i.InputSimd128Register(0),
3639                       ctl = i.InputSimd128Register(1);
3640       DCHECK(dst != ctl && dst != tbl);
3641       Simd128Register zeroReg = i.TempSimd128Register(0);
3642       __ xor_v(zeroReg, zeroReg, zeroReg);
3643       __ move_v(dst, ctl);
3644       __ vshf_b(dst, zeroReg, tbl);
3645       break;
3646     }
3647     case kMips64S8x8Reverse: {
3648       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3649       // src = [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
3650       // dst = [8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7]
3651       // [A B C D] => [B A D C]: shf.w imm: 2 3 0 1 = 10110001 = 0xB1
3652       // C: [7, 6, 5, 4] => A': [4, 5, 6, 7]: shf.b imm: 00011011 = 0x1B
3653       __ shf_w(kSimd128ScratchReg, i.InputSimd128Register(0), 0xB1);
3654       __ shf_b(i.OutputSimd128Register(), kSimd128ScratchReg, 0x1B);
3655       break;
3656     }
3657     case kMips64S8x4Reverse: {
3658       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3659       // src = [15, 14, ... 3, 2, 1, 0], dst = [12, 13, 14, 15, ... 0, 1, 2, 3]
3660       // shf.df imm field: 0 1 2 3 = 00011011 = 0x1B
3661       __ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B);
3662       break;
3663     }
3664     case kMips64S8x2Reverse: {
3665       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3666       // src = [15, 14, ... 3, 2, 1, 0], dst = [14, 15, 12, 13, ... 2, 3, 0, 1]
3667       // shf.df imm field: 2 3 0 1 = 10110001 = 0xB1
3668       __ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1);
3669       break;
3670     }
3671     case kMips64I32x4SConvertI16x8Low: {
3672       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3673       Simd128Register dst = i.OutputSimd128Register();
3674       Simd128Register src = i.InputSimd128Register(0);
3675       __ ilvr_h(kSimd128ScratchReg, src, src);
3676       __ slli_w(dst, kSimd128ScratchReg, 16);
3677       __ srai_w(dst, dst, 16);
3678       break;
3679     }
3680     case kMips64I32x4SConvertI16x8High: {
3681       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3682       Simd128Register dst = i.OutputSimd128Register();
3683       Simd128Register src = i.InputSimd128Register(0);
3684       __ ilvl_h(kSimd128ScratchReg, src, src);
3685       __ slli_w(dst, kSimd128ScratchReg, 16);
3686       __ srai_w(dst, dst, 16);
3687       break;
3688     }
3689     case kMips64I32x4UConvertI16x8Low: {
3690       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3691       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
3692       __ ilvr_h(i.OutputSimd128Register(), kSimd128RegZero,
3693                 i.InputSimd128Register(0));
3694       break;
3695     }
3696     case kMips64I32x4UConvertI16x8High: {
3697       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3698       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
3699       __ ilvl_h(i.OutputSimd128Register(), kSimd128RegZero,
3700                 i.InputSimd128Register(0));
3701       break;
3702     }
3703     case kMips64I16x8SConvertI8x16Low: {
3704       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3705       Simd128Register dst = i.OutputSimd128Register();
3706       Simd128Register src = i.InputSimd128Register(0);
3707       __ ilvr_b(kSimd128ScratchReg, src, src);
3708       __ slli_h(dst, kSimd128ScratchReg, 8);
3709       __ srai_h(dst, dst, 8);
3710       break;
3711     }
3712     case kMips64I16x8SConvertI8x16High: {
3713       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3714       Simd128Register dst = i.OutputSimd128Register();
3715       Simd128Register src = i.InputSimd128Register(0);
3716       __ ilvl_b(kSimd128ScratchReg, src, src);
3717       __ slli_h(dst, kSimd128ScratchReg, 8);
3718       __ srai_h(dst, dst, 8);
3719       break;
3720     }
3721     case kMips64I16x8SConvertI32x4: {
3722       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3723       Simd128Register dst = i.OutputSimd128Register();
3724       Simd128Register src0 = i.InputSimd128Register(0);
3725       Simd128Register src1 = i.InputSimd128Register(1);
3726       __ sat_s_w(kSimd128ScratchReg, src0, 15);
3727       __ sat_s_w(kSimd128RegZero, src1, 15);  // kSimd128RegZero as scratch
3728       __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
3729       break;
3730     }
3731     case kMips64I16x8UConvertI32x4: {
3732       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3733       Simd128Register dst = i.OutputSimd128Register();
3734       Simd128Register src0 = i.InputSimd128Register(0);
3735       Simd128Register src1 = i.InputSimd128Register(1);
3736       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
3737       __ max_s_w(kSimd128ScratchReg, kSimd128RegZero, src0);
3738       __ sat_u_w(kSimd128ScratchReg, kSimd128ScratchReg, 15);
3739       __ max_s_w(dst, kSimd128RegZero, src1);
3740       __ sat_u_w(dst, dst, 15);
3741       __ pckev_h(dst, dst, kSimd128ScratchReg);
3742       break;
3743     }
3744     case kMips64I16x8UConvertI8x16Low: {
3745       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3746       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
3747       __ ilvr_b(i.OutputSimd128Register(), kSimd128RegZero,
3748                 i.InputSimd128Register(0));
3749       break;
3750     }
3751     case kMips64I16x8UConvertI8x16High: {
3752       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3753       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
3754       __ ilvl_b(i.OutputSimd128Register(), kSimd128RegZero,
3755                 i.InputSimd128Register(0));
3756       break;
3757     }
3758     case kMips64I8x16SConvertI16x8: {
3759       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3760       Simd128Register dst = i.OutputSimd128Register();
3761       Simd128Register src0 = i.InputSimd128Register(0);
3762       Simd128Register src1 = i.InputSimd128Register(1);
3763       __ sat_s_h(kSimd128ScratchReg, src0, 7);
3764       __ sat_s_h(kSimd128RegZero, src1, 7);  // kSimd128RegZero as scratch
3765       __ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg);
3766       break;
3767     }
3768     case kMips64I8x16UConvertI16x8: {
3769       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3770       Simd128Register dst = i.OutputSimd128Register();
3771       Simd128Register src0 = i.InputSimd128Register(0);
3772       Simd128Register src1 = i.InputSimd128Register(1);
3773       __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
3774       __ max_s_h(kSimd128ScratchReg, kSimd128RegZero, src0);
3775       __ sat_u_h(kSimd128ScratchReg, kSimd128ScratchReg, 7);
3776       __ max_s_h(dst, kSimd128RegZero, src1);
3777       __ sat_u_h(dst, dst, 7);
3778       __ pckev_b(dst, dst, kSimd128ScratchReg);
3779       break;
3780     }
3781   }
3782   return kSuccess;
3783 }
3784 
3785 #define UNSUPPORTED_COND(opcode, condition)                                    \
3786   StdoutStream{} << "Unsupported " << #opcode << " condition: \"" << condition \
3787                  << "\"";                                                      \
3788   UNIMPLEMENTED();
3789 
AssembleBranchToLabels(CodeGenerator * gen,TurboAssembler * tasm,Instruction * instr,FlagsCondition condition,Label * tlabel,Label * flabel,bool fallthru)3790 void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
3791                             Instruction* instr, FlagsCondition condition,
3792                             Label* tlabel, Label* flabel, bool fallthru) {
3793 #undef __
3794 #define __ tasm->
3795   MipsOperandConverter i(gen, instr);
3796 
3797   Condition cc = kNoCondition;
3798   // MIPS does not have condition code flags, so compare and branch are
3799   // implemented differently than on the other arch's. The compare operations
3800   // emit mips pseudo-instructions, which are handled here by branch
3801   // instructions that do the actual comparison. Essential that the input
3802   // registers to compare pseudo-op are not modified before this branch op, as
3803   // they are tested here.
3804 
3805   if (instr->arch_opcode() == kMips64Tst) {
3806     cc = FlagsConditionToConditionTst(condition);
3807     __ Branch(tlabel, cc, kScratchReg, Operand(zero_reg));
3808   } else if (instr->arch_opcode() == kMips64Dadd ||
3809              instr->arch_opcode() == kMips64Dsub) {
3810     cc = FlagsConditionToConditionOvf(condition);
3811     __ dsra32(kScratchReg, i.OutputRegister(), 0);
3812     __ sra(kScratchReg2, i.OutputRegister(), 31);
3813     __ Branch(tlabel, cc, kScratchReg2, Operand(kScratchReg));
3814   } else if (instr->arch_opcode() == kMips64DaddOvf ||
3815              instr->arch_opcode() == kMips64DsubOvf) {
3816     switch (condition) {
3817       // Overflow occurs if overflow register is negative
3818       case kOverflow:
3819         __ Branch(tlabel, lt, kScratchReg, Operand(zero_reg));
3820         break;
3821       case kNotOverflow:
3822         __ Branch(tlabel, ge, kScratchReg, Operand(zero_reg));
3823         break;
3824       default:
3825         UNSUPPORTED_COND(instr->arch_opcode(), condition);
3826     }
3827   } else if (instr->arch_opcode() == kMips64MulOvf) {
3828     // Overflow occurs if overflow register is not zero
3829     switch (condition) {
3830       case kOverflow:
3831         __ Branch(tlabel, ne, kScratchReg, Operand(zero_reg));
3832         break;
3833       case kNotOverflow:
3834         __ Branch(tlabel, eq, kScratchReg, Operand(zero_reg));
3835         break;
3836       default:
3837         UNSUPPORTED_COND(kMipsMulOvf, condition);
3838     }
3839   } else if (instr->arch_opcode() == kMips64Cmp) {
3840     cc = FlagsConditionToConditionCmp(condition);
3841     __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
3842   } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
3843     cc = FlagsConditionToConditionCmp(condition);
3844     DCHECK((cc == ls) || (cc == hi));
3845     if (cc == ls) {
3846       __ xori(i.TempRegister(0), i.TempRegister(0), 1);
3847     }
3848     __ Branch(tlabel, ne, i.TempRegister(0), Operand(zero_reg));
3849   } else if (instr->arch_opcode() == kMips64CmpS ||
3850              instr->arch_opcode() == kMips64CmpD) {
3851     bool predicate;
3852     FlagsConditionToConditionCmpFPU(&predicate, condition);
3853     if (predicate) {
3854       __ BranchTrueF(tlabel);
3855     } else {
3856       __ BranchFalseF(tlabel);
3857     }
3858   } else {
3859     PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
3860            instr->arch_opcode());
3861     UNIMPLEMENTED();
3862   }
3863   if (!fallthru) __ Branch(flabel);  // no fallthru to flabel.
3864 #undef __
3865 #define __ tasm()->
3866 }
3867 
3868 // Assembles branches after an instruction.
AssembleArchBranch(Instruction * instr,BranchInfo * branch)3869 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
3870   Label* tlabel = branch->true_label;
3871   Label* flabel = branch->false_label;
3872 
3873   AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel,
3874                          branch->fallthru);
3875 }
3876 
3877 #undef UNSUPPORTED_COND
3878 
AssembleArchDeoptBranch(Instruction * instr,BranchInfo * branch)3879 void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
3880                                             BranchInfo* branch) {
3881   AssembleArchBranch(instr, branch);
3882 }
3883 
AssembleArchJumpRegardlessOfAssemblyOrder(RpoNumber target)3884 void CodeGenerator::AssembleArchJumpRegardlessOfAssemblyOrder(
3885     RpoNumber target) {
3886   __ Branch(GetLabel(target));
3887 }
3888 
3889 #if V8_ENABLE_WEBASSEMBLY
AssembleArchTrap(Instruction * instr,FlagsCondition condition)3890 void CodeGenerator::AssembleArchTrap(Instruction* instr,
3891                                      FlagsCondition condition) {
3892   class OutOfLineTrap final : public OutOfLineCode {
3893    public:
3894     OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
3895         : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
3896     void Generate() final {
3897       MipsOperandConverter i(gen_, instr_);
3898       TrapId trap_id =
3899           static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
3900       GenerateCallToTrap(trap_id);
3901     }
3902 
3903    private:
3904     void GenerateCallToTrap(TrapId trap_id) {
3905       if (trap_id == TrapId::kInvalid) {
3906         // We cannot test calls to the runtime in cctest/test-run-wasm.
3907         // Therefore we emit a call to C here instead of a call to the runtime.
3908         // We use the context register as the scratch register, because we do
3909         // not have a context here.
3910         __ PrepareCallCFunction(0, 0, cp);
3911         __ CallCFunction(
3912             ExternalReference::wasm_call_trap_callback_for_testing(), 0);
3913         __ LeaveFrame(StackFrame::WASM);
3914         auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
3915         int pop_count = static_cast<int>(call_descriptor->ParameterSlotCount());
3916         pop_count += (pop_count & 1);  // align
3917         __ Drop(pop_count);
3918         __ Ret();
3919       } else {
3920         gen_->AssembleSourcePosition(instr_);
3921         // A direct call to a wasm runtime stub defined in this module.
3922         // Just encode the stub index. This will be patched when the code
3923         // is added to the native module and copied into wasm code space.
3924         __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
3925         ReferenceMap* reference_map =
3926             gen_->zone()->New<ReferenceMap>(gen_->zone());
3927         gen_->RecordSafepoint(reference_map);
3928         if (FLAG_debug_code) {
3929           __ stop();
3930         }
3931       }
3932     }
3933     Instruction* instr_;
3934     CodeGenerator* gen_;
3935   };
3936   auto ool = zone()->New<OutOfLineTrap>(this, instr);
3937   Label* tlabel = ool->entry();
3938   AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true);
3939 }
3940 #endif  // V8_ENABLE_WEBASSEMBLY
3941 
3942 // Assembles boolean materializations after an instruction.
AssembleArchBoolean(Instruction * instr,FlagsCondition condition)3943 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
3944                                         FlagsCondition condition) {
3945   MipsOperandConverter i(this, instr);
3946 
3947   // Materialize a full 32-bit 1 or 0 value. The result register is always the
3948   // last output of the instruction.
3949   DCHECK_NE(0u, instr->OutputCount());
3950   Register result = i.OutputRegister(instr->OutputCount() - 1);
3951   Condition cc = kNoCondition;
3952   // MIPS does not have condition code flags, so compare and branch are
3953   // implemented differently than on the other arch's. The compare operations
3954   // emit mips pseudo-instructions, which are checked and handled here.
3955 
3956   if (instr->arch_opcode() == kMips64Tst) {
3957     cc = FlagsConditionToConditionTst(condition);
3958     if (cc == eq) {
3959       __ Sltu(result, kScratchReg, 1);
3960     } else {
3961       __ Sltu(result, zero_reg, kScratchReg);
3962     }
3963     return;
3964   } else if (instr->arch_opcode() == kMips64Dadd ||
3965              instr->arch_opcode() == kMips64Dsub) {
3966     cc = FlagsConditionToConditionOvf(condition);
3967     // Check for overflow creates 1 or 0 for result.
3968     __ dsrl32(kScratchReg, i.OutputRegister(), 31);
3969     __ srl(kScratchReg2, i.OutputRegister(), 31);
3970     __ xor_(result, kScratchReg, kScratchReg2);
3971     if (cc == eq)  // Toggle result for not overflow.
3972       __ xori(result, result, 1);
3973     return;
3974   } else if (instr->arch_opcode() == kMips64DaddOvf ||
3975              instr->arch_opcode() == kMips64DsubOvf) {
3976     // Overflow occurs if overflow register is negative
3977     __ slt(result, kScratchReg, zero_reg);
3978   } else if (instr->arch_opcode() == kMips64MulOvf) {
3979     // Overflow occurs if overflow register is not zero
3980     __ Sgtu(result, kScratchReg, zero_reg);
3981   } else if (instr->arch_opcode() == kMips64Cmp) {
3982     cc = FlagsConditionToConditionCmp(condition);
3983     switch (cc) {
3984       case eq:
3985       case ne: {
3986         Register left = i.InputRegister(0);
3987         Operand right = i.InputOperand(1);
3988         if (instr->InputAt(1)->IsImmediate()) {
3989           if (is_int16(-right.immediate())) {
3990             if (right.immediate() == 0) {
3991               if (cc == eq) {
3992                 __ Sltu(result, left, 1);
3993               } else {
3994                 __ Sltu(result, zero_reg, left);
3995               }
3996             } else {
3997               __ Daddu(result, left, Operand(-right.immediate()));
3998               if (cc == eq) {
3999                 __ Sltu(result, result, 1);
4000               } else {
4001                 __ Sltu(result, zero_reg, result);
4002               }
4003             }
4004           } else {
4005             if (is_uint16(right.immediate())) {
4006               __ Xor(result, left, right);
4007             } else {
4008               __ li(kScratchReg, right);
4009               __ Xor(result, left, kScratchReg);
4010             }
4011             if (cc == eq) {
4012               __ Sltu(result, result, 1);
4013             } else {
4014               __ Sltu(result, zero_reg, result);
4015             }
4016           }
4017         } else {
4018           __ Xor(result, left, right);
4019           if (cc == eq) {
4020             __ Sltu(result, result, 1);
4021           } else {
4022             __ Sltu(result, zero_reg, result);
4023           }
4024         }
4025       } break;
4026       case lt:
4027       case ge: {
4028         Register left = i.InputRegister(0);
4029         Operand right = i.InputOperand(1);
4030         __ Slt(result, left, right);
4031         if (cc == ge) {
4032           __ xori(result, result, 1);
4033         }
4034       } break;
4035       case gt:
4036       case le: {
4037         Register left = i.InputRegister(1);
4038         Operand right = i.InputOperand(0);
4039         __ Slt(result, left, right);
4040         if (cc == le) {
4041           __ xori(result, result, 1);
4042         }
4043       } break;
4044       case lo:
4045       case hs: {
4046         Register left = i.InputRegister(0);
4047         Operand right = i.InputOperand(1);
4048         __ Sltu(result, left, right);
4049         if (cc == hs) {
4050           __ xori(result, result, 1);
4051         }
4052       } break;
4053       case hi:
4054       case ls: {
4055         Register left = i.InputRegister(1);
4056         Operand right = i.InputOperand(0);
4057         __ Sltu(result, left, right);
4058         if (cc == ls) {
4059           __ xori(result, result, 1);
4060         }
4061       } break;
4062       default:
4063         UNREACHABLE();
4064     }
4065     return;
4066   } else if (instr->arch_opcode() == kMips64CmpD ||
4067              instr->arch_opcode() == kMips64CmpS) {
4068     FPURegister left = i.InputOrZeroDoubleRegister(0);
4069     FPURegister right = i.InputOrZeroDoubleRegister(1);
4070     if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
4071         !__ IsDoubleZeroRegSet()) {
4072       __ Move(kDoubleRegZero, 0.0);
4073     }
4074     bool predicate;
4075     FlagsConditionToConditionCmpFPU(&predicate, condition);
4076     if (kArchVariant != kMips64r6) {
4077       __ li(result, Operand(1));
4078       if (predicate) {
4079         __ Movf(result, zero_reg);
4080       } else {
4081         __ Movt(result, zero_reg);
4082       }
4083     } else {
4084       if (instr->arch_opcode() == kMips64CmpD) {
4085         __ dmfc1(result, kDoubleCompareReg);
4086       } else {
4087         DCHECK_EQ(kMips64CmpS, instr->arch_opcode());
4088         __ mfc1(result, kDoubleCompareReg);
4089       }
4090       if (predicate) {
4091         __ And(result, result, 1);  // cmp returns all 1's/0's, use only LSB.
4092       } else {
4093         __ Addu(result, result, 1);  // Toggle result for not equal.
4094       }
4095     }
4096     return;
4097   } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
4098     cc = FlagsConditionToConditionCmp(condition);
4099     DCHECK((cc == ls) || (cc == hi));
4100     if (cc == ls) {
4101       __ xori(i.OutputRegister(), i.TempRegister(0), 1);
4102     }
4103     return;
4104   } else {
4105     PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
4106            instr->arch_opcode());
4107     TRACE_UNIMPL();
4108     UNIMPLEMENTED();
4109   }
4110 }
4111 
AssembleArchBinarySearchSwitch(Instruction * instr)4112 void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
4113   MipsOperandConverter i(this, instr);
4114   Register input = i.InputRegister(0);
4115   std::vector<std::pair<int32_t, Label*>> cases;
4116   for (size_t index = 2; index < instr->InputCount(); index += 2) {
4117     cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))});
4118   }
4119   AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(),
4120                                       cases.data() + cases.size());
4121 }
4122 
AssembleArchTableSwitch(Instruction * instr)4123 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
4124   MipsOperandConverter i(this, instr);
4125   Register input = i.InputRegister(0);
4126   size_t const case_count = instr->InputCount() - 2;
4127 
4128   __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
4129   __ GenerateSwitchTable(input, case_count, [&i, this](size_t index) {
4130     return GetLabel(i.InputRpo(index + 2));
4131   });
4132 }
4133 
AssembleArchSelect(Instruction * instr,FlagsCondition condition)4134 void CodeGenerator::AssembleArchSelect(Instruction* instr,
4135                                        FlagsCondition condition) {
4136   UNIMPLEMENTED();
4137 }
4138 
FinishFrame(Frame * frame)4139 void CodeGenerator::FinishFrame(Frame* frame) {
4140   auto call_descriptor = linkage()->GetIncomingDescriptor();
4141 
4142   const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
4143   if (!saves_fpu.is_empty()) {
4144     int count = saves_fpu.Count();
4145     DCHECK_EQ(kNumCalleeSavedFPU, count);
4146     frame->AllocateSavedCalleeRegisterSlots(count *
4147                                             (kDoubleSize / kSystemPointerSize));
4148   }
4149 
4150   const RegList saves = call_descriptor->CalleeSavedRegisters();
4151   if (!saves.is_empty()) {
4152     int count = saves.Count();
4153     frame->AllocateSavedCalleeRegisterSlots(count);
4154   }
4155 }
4156 
AssembleConstructFrame()4157 void CodeGenerator::AssembleConstructFrame() {
4158   auto call_descriptor = linkage()->GetIncomingDescriptor();
4159 
4160   if (frame_access_state()->has_frame()) {
4161     if (call_descriptor->IsCFunctionCall()) {
4162 #if V8_ENABLE_WEBASSEMBLY
4163       if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
4164         __ StubPrologue(StackFrame::C_WASM_ENTRY);
4165         // Reserve stack space for saving the c_entry_fp later.
4166         __ Dsubu(sp, sp, Operand(kSystemPointerSize));
4167 #else
4168       // For balance.
4169       if (false) {
4170 #endif  // V8_ENABLE_WEBASSEMBLY
4171       } else {
4172         __ Push(ra, fp);
4173         __ mov(fp, sp);
4174       }
4175     } else if (call_descriptor->IsJSFunctionCall()) {
4176       __ Prologue();
4177     } else {
4178       __ StubPrologue(info()->GetOutputStackFrameType());
4179 #if V8_ENABLE_WEBASSEMBLY
4180       if (call_descriptor->IsWasmFunctionCall() ||
4181           call_descriptor->IsWasmImportWrapper() ||
4182           call_descriptor->IsWasmCapiFunction()) {
4183         __ Push(kWasmInstanceRegister);
4184       }
4185       if (call_descriptor->IsWasmCapiFunction()) {
4186         // Reserve space for saving the PC later.
4187         __ Dsubu(sp, sp, Operand(kSystemPointerSize));
4188       }
4189 #endif  // V8_ENABLE_WEBASSEMBLY
4190     }
4191   }
4192 
4193   int required_slots =
4194       frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
4195 
4196   if (info()->is_osr()) {
4197     // TurboFan OSR-compiled functions cannot be entered directly.
4198     __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
4199 
4200     // Unoptimized code jumps directly to this entrypoint while the unoptimized
4201     // frame is still on the stack. Optimized code uses OSR values directly from
4202     // the unoptimized frame. Thus, all that needs to be done is to allocate the
4203     // remaining stack slots.
4204     __ RecordComment("-- OSR entrypoint --");
4205     osr_pc_offset_ = __ pc_offset();
4206     required_slots -= osr_helper()->UnoptimizedFrameSlots();
4207   }
4208 
4209   const RegList saves = call_descriptor->CalleeSavedRegisters();
4210   const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
4211 
4212   if (required_slots > 0) {
4213     DCHECK(frame_access_state()->has_frame());
4214 #if V8_ENABLE_WEBASSEMBLY
4215     if (info()->IsWasm() && required_slots * kSystemPointerSize > 4 * KB) {
4216       // For WebAssembly functions with big frames we have to do the stack
4217       // overflow check before we construct the frame. Otherwise we may not
4218       // have enough space on the stack to call the runtime for the stack
4219       // overflow.
4220       Label done;
4221 
4222       // If the frame is bigger than the stack, we throw the stack overflow
4223       // exception unconditionally. Thereby we can avoid the integer overflow
4224       // check in the condition code.
4225       if (required_slots * kSystemPointerSize < FLAG_stack_size * KB) {
4226         __ Ld(
4227              kScratchReg,
4228              FieldMemOperand(kWasmInstanceRegister,
4229                              WasmInstanceObject::kRealStackLimitAddressOffset));
4230         __ Ld(kScratchReg, MemOperand(kScratchReg));
4231         __ Daddu(kScratchReg, kScratchReg,
4232                  Operand(required_slots * kSystemPointerSize));
4233         __ Branch(&done, uge, sp, Operand(kScratchReg));
4234       }
4235 
4236       __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
4237       // The call does not return, hence we can ignore any references and just
4238       // define an empty safepoint.
4239       ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
4240       RecordSafepoint(reference_map);
4241       if (FLAG_debug_code) __ stop();
4242 
4243       __ bind(&done);
4244     }
4245 #endif  // V8_ENABLE_WEBASSEMBLY
4246   }
4247 
4248   const int returns = frame()->GetReturnSlotCount();
4249 
4250   // Skip callee-saved and return slots, which are pushed below.
4251   required_slots -= saves.Count();
4252   required_slots -= saves_fpu.Count();
4253   required_slots -= returns;
4254   if (required_slots > 0) {
4255     __ Dsubu(sp, sp, Operand(required_slots * kSystemPointerSize));
4256   }
4257 
4258   if (!saves_fpu.is_empty()) {
4259     // Save callee-saved FPU registers.
4260     __ MultiPushFPU(saves_fpu);
4261     DCHECK_EQ(kNumCalleeSavedFPU, saves_fpu.Count());
4262   }
4263 
4264   if (!saves.is_empty()) {
4265     // Save callee-saved registers.
4266     __ MultiPush(saves);
4267   }
4268 
4269   if (returns != 0) {
4270     // Create space for returns.
4271     __ Dsubu(sp, sp, Operand(returns * kSystemPointerSize));
4272   }
4273 }
4274 
4275 void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
4276   auto call_descriptor = linkage()->GetIncomingDescriptor();
4277 
4278   const int returns = frame()->GetReturnSlotCount();
4279   if (returns != 0) {
4280     __ Daddu(sp, sp, Operand(returns * kSystemPointerSize));
4281   }
4282 
4283   // Restore GP registers.
4284   const RegList saves = call_descriptor->CalleeSavedRegisters();
4285   if (!saves.is_empty()) {
4286     __ MultiPop(saves);
4287   }
4288 
4289   // Restore FPU registers.
4290   const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
4291   if (!saves_fpu.is_empty()) {
4292     __ MultiPopFPU(saves_fpu);
4293   }
4294 
4295   MipsOperandConverter g(this, nullptr);
4296 
4297   const int parameter_slots =
4298       static_cast<int>(call_descriptor->ParameterSlotCount());
4299 
4300   // {aditional_pop_count} is only greater than zero if {parameter_slots = 0}.
4301   // Check RawMachineAssembler::PopAndReturn.
4302   if (parameter_slots != 0) {
4303     if (additional_pop_count->IsImmediate()) {
4304       DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
4305     } else if (FLAG_debug_code) {
4306       __ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue,
4307                 g.ToRegister(additional_pop_count),
4308                 Operand(static_cast<int64_t>(0)));
4309     }
4310   }
4311 
4312   // Functions with JS linkage have at least one parameter (the receiver).
4313   // If {parameter_slots} == 0, it means it is a builtin with
4314   // kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
4315   // itself.
4316   const bool drop_jsargs = frame_access_state()->has_frame() &&
4317                            call_descriptor->IsJSFunctionCall() &&
4318                            parameter_slots != 0;
4319 
4320   if (call_descriptor->IsCFunctionCall()) {
4321     AssembleDeconstructFrame();
4322   } else if (frame_access_state()->has_frame()) {
4323     // Canonicalize JSFunction return sites for now unless they have an variable
4324     // number of stack slot pops.
4325     if (additional_pop_count->IsImmediate() &&
4326         g.ToConstant(additional_pop_count).ToInt32() == 0) {
4327       if (return_label_.is_bound()) {
4328         __ Branch(&return_label_);
4329         return;
4330       } else {
4331         __ bind(&return_label_);
4332       }
4333     }
4334     if (drop_jsargs) {
4335       // Get the actual argument count
4336       __ Ld(t0, MemOperand(fp, StandardFrameConstants::kArgCOffset));
4337     }
4338     AssembleDeconstructFrame();
4339   }
4340   if (drop_jsargs) {
4341     // We must pop all arguments from the stack (including the receiver). This
4342     // number of arguments is given by max(1 + argc_reg, parameter_slots).
4343     if (parameter_slots > 1) {
4344       __ li(kScratchReg, parameter_slots);
4345       __ slt(kScratchReg2, t0, kScratchReg);
4346       __ movn(t0, kScratchReg, kScratchReg2);
4347     }
4348     __ Dlsa(sp, sp, t0, kSystemPointerSizeLog2);
4349   } else if (additional_pop_count->IsImmediate()) {
4350     int additional_count = g.ToConstant(additional_pop_count).ToInt32();
4351     __ Drop(parameter_slots + additional_count);
4352   } else {
4353     Register pop_reg = g.ToRegister(additional_pop_count);
4354     __ Drop(parameter_slots);
4355     __ Dlsa(sp, sp, pop_reg, kSystemPointerSizeLog2);
4356   }
4357   __ Ret();
4358 }
4359 
4360 void CodeGenerator::FinishCode() {}
4361 
4362 void CodeGenerator::PrepareForDeoptimizationExits(
4363     ZoneDeque<DeoptimizationExit*>* exits) {}
4364 
4365 void CodeGenerator::AssembleMove(InstructionOperand* source,
4366                                  InstructionOperand* destination) {
4367   MipsOperandConverter g(this, nullptr);
4368   // Dispatch on the source and destination operand kinds.  Not all
4369   // combinations are possible.
4370   if (source->IsRegister()) {
4371     DCHECK(destination->IsRegister() || destination->IsStackSlot());
4372     Register src = g.ToRegister(source);
4373     if (destination->IsRegister()) {
4374       __ mov(g.ToRegister(destination), src);
4375     } else {
4376       __ Sd(src, g.ToMemOperand(destination));
4377     }
4378   } else if (source->IsStackSlot()) {
4379     DCHECK(destination->IsRegister() || destination->IsStackSlot());
4380     MemOperand src = g.ToMemOperand(source);
4381     if (destination->IsRegister()) {
4382       __ Ld(g.ToRegister(destination), src);
4383     } else {
4384       Register temp = kScratchReg;
4385       __ Ld(temp, src);
4386       __ Sd(temp, g.ToMemOperand(destination));
4387     }
4388   } else if (source->IsConstant()) {
4389     Constant src = g.ToConstant(source);
4390     if (destination->IsRegister() || destination->IsStackSlot()) {
4391       Register dst =
4392           destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
4393       switch (src.type()) {
4394         case Constant::kInt32:
4395           __ li(dst, Operand(src.ToInt32()));
4396           break;
4397         case Constant::kFloat32:
4398           __ li(dst, Operand::EmbeddedNumber(src.ToFloat32()));
4399           break;
4400         case Constant::kInt64:
4401 #if V8_ENABLE_WEBASSEMBLY
4402           if (RelocInfo::IsWasmReference(src.rmode()))
4403             __ li(dst, Operand(src.ToInt64(), src.rmode()));
4404           else
4405 #endif  // V8_ENABLE_WEBASSEMBLY
4406             __ li(dst, Operand(src.ToInt64()));
4407           break;
4408         case Constant::kFloat64:
4409           __ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
4410           break;
4411         case Constant::kExternalReference:
4412           __ li(dst, src.ToExternalReference());
4413           break;
4414         case Constant::kDelayedStringConstant:
4415           __ li(dst, src.ToDelayedStringConstant());
4416           break;
4417         case Constant::kHeapObject: {
4418           Handle<HeapObject> src_object = src.ToHeapObject();
4419           RootIndex index;
4420           if (IsMaterializableFromRoot(src_object, &index)) {
4421             __ LoadRoot(dst, index);
4422           } else {
4423             __ li(dst, src_object);
4424           }
4425           break;
4426         }
4427         case Constant::kCompressedHeapObject:
4428           UNREACHABLE();
4429         case Constant::kRpoNumber:
4430           UNREACHABLE();  // TODO(titzer): loading RPO numbers on mips64.
4431       }
4432       if (destination->IsStackSlot()) __ Sd(dst, g.ToMemOperand(destination));
4433     } else if (src.type() == Constant::kFloat32) {
4434       if (destination->IsFPStackSlot()) {
4435         MemOperand dst = g.ToMemOperand(destination);
4436         if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
4437           __ Sd(zero_reg, dst);
4438         } else {
4439           __ li(kScratchReg, Operand(bit_cast<int32_t>(src.ToFloat32())));
4440           __ Sd(kScratchReg, dst);
4441         }
4442       } else {
4443         DCHECK(destination->IsFPRegister());
4444         FloatRegister dst = g.ToSingleRegister(destination);
4445         __ Move(dst, src.ToFloat32());
4446       }
4447     } else {
4448       DCHECK_EQ(Constant::kFloat64, src.type());
4449       DoubleRegister dst = destination->IsFPRegister()
4450                                ? g.ToDoubleRegister(destination)
4451                                : kScratchDoubleReg;
4452       __ Move(dst, src.ToFloat64().value());
4453       if (destination->IsFPStackSlot()) {
4454         __ Sdc1(dst, g.ToMemOperand(destination));
4455       }
4456     }
4457   } else if (source->IsFPRegister()) {
4458     MachineRepresentation rep = LocationOperand::cast(source)->representation();
4459     if (rep == MachineRepresentation::kSimd128) {
4460       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
4461       MSARegister src = g.ToSimd128Register(source);
4462       if (destination->IsSimd128Register()) {
4463         MSARegister dst = g.ToSimd128Register(destination);
4464         __ move_v(dst, src);
4465       } else {
4466         DCHECK(destination->IsSimd128StackSlot());
4467         __ st_b(src, g.ToMemOperand(destination));
4468       }
4469     } else {
4470       FPURegister src = g.ToDoubleRegister(source);
4471       if (destination->IsFPRegister()) {
4472         FPURegister dst = g.ToDoubleRegister(destination);
4473         __ Move(dst, src);
4474       } else {
4475         DCHECK(destination->IsFPStackSlot());
4476         __ Sdc1(src, g.ToMemOperand(destination));
4477       }
4478     }
4479   } else if (source->IsFPStackSlot()) {
4480     DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
4481     MemOperand src = g.ToMemOperand(source);
4482     MachineRepresentation rep = LocationOperand::cast(source)->representation();
4483     if (rep == MachineRepresentation::kSimd128) {
4484       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
4485       if (destination->IsSimd128Register()) {
4486         __ ld_b(g.ToSimd128Register(destination), src);
4487       } else {
4488         DCHECK(destination->IsSimd128StackSlot());
4489         MSARegister temp = kSimd128ScratchReg;
4490         __ ld_b(temp, src);
4491         __ st_b(temp, g.ToMemOperand(destination));
4492       }
4493     } else {
4494       if (destination->IsFPRegister()) {
4495         __ Ldc1(g.ToDoubleRegister(destination), src);
4496       } else {
4497         DCHECK(destination->IsFPStackSlot());
4498         FPURegister temp = kScratchDoubleReg;
4499         __ Ldc1(temp, src);
4500         __ Sdc1(temp, g.ToMemOperand(destination));
4501       }
4502     }
4503   } else {
4504     UNREACHABLE();
4505   }
4506 }
4507 
4508 void CodeGenerator::AssembleSwap(InstructionOperand* source,
4509                                  InstructionOperand* destination) {
4510   MipsOperandConverter g(this, nullptr);
4511   // Dispatch on the source and destination operand kinds.  Not all
4512   // combinations are possible.
4513   if (source->IsRegister()) {
4514     // Register-register.
4515     Register temp = kScratchReg;
4516     Register src = g.ToRegister(source);
4517     if (destination->IsRegister()) {
4518       Register dst = g.ToRegister(destination);
4519       __ Move(temp, src);
4520       __ Move(src, dst);
4521       __ Move(dst, temp);
4522     } else {
4523       DCHECK(destination->IsStackSlot());
4524       MemOperand dst = g.ToMemOperand(destination);
4525       __ mov(temp, src);
4526       __ Ld(src, dst);
4527       __ Sd(temp, dst);
4528     }
4529   } else if (source->IsStackSlot()) {
4530     DCHECK(destination->IsStackSlot());
4531     Register temp_0 = kScratchReg;
4532     Register temp_1 = kScratchReg2;
4533     MemOperand src = g.ToMemOperand(source);
4534     MemOperand dst = g.ToMemOperand(destination);
4535     __ Ld(temp_0, src);
4536     __ Ld(temp_1, dst);
4537     __ Sd(temp_0, dst);
4538     __ Sd(temp_1, src);
4539   } else if (source->IsFPRegister()) {
4540     MachineRepresentation rep = LocationOperand::cast(source)->representation();
4541     if (rep == MachineRepresentation::kSimd128) {
4542       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
4543       MSARegister temp = kSimd128ScratchReg;
4544       MSARegister src = g.ToSimd128Register(source);
4545       if (destination->IsSimd128Register()) {
4546         MSARegister dst = g.ToSimd128Register(destination);
4547         __ move_v(temp, src);
4548         __ move_v(src, dst);
4549         __ move_v(dst, temp);
4550       } else {
4551         DCHECK(destination->IsSimd128StackSlot());
4552         MemOperand dst = g.ToMemOperand(destination);
4553         __ move_v(temp, src);
4554         __ ld_b(src, dst);
4555         __ st_b(temp, dst);
4556       }
4557     } else {
4558       FPURegister temp = kScratchDoubleReg;
4559       FPURegister src = g.ToDoubleRegister(source);
4560       if (destination->IsFPRegister()) {
4561         FPURegister dst = g.ToDoubleRegister(destination);
4562         __ Move(temp, src);
4563         __ Move(src, dst);
4564         __ Move(dst, temp);
4565       } else {
4566         DCHECK(destination->IsFPStackSlot());
4567         MemOperand dst = g.ToMemOperand(destination);
4568         __ Move(temp, src);
4569         __ Ldc1(src, dst);
4570         __ Sdc1(temp, dst);
4571       }
4572     }
4573   } else if (source->IsFPStackSlot()) {
4574     DCHECK(destination->IsFPStackSlot());
4575     Register temp_0 = kScratchReg;
4576     MemOperand src0 = g.ToMemOperand(source);
4577     MemOperand src1(src0.rm(), src0.offset() + kIntSize);
4578     MemOperand dst0 = g.ToMemOperand(destination);
4579     MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
4580     MachineRepresentation rep = LocationOperand::cast(source)->representation();
4581     if (rep == MachineRepresentation::kSimd128) {
4582       MemOperand src2(src0.rm(), src0.offset() + 2 * kIntSize);
4583       MemOperand src3(src0.rm(), src0.offset() + 3 * kIntSize);
4584       MemOperand dst2(dst0.rm(), dst0.offset() + 2 * kIntSize);
4585       MemOperand dst3(dst0.rm(), dst0.offset() + 3 * kIntSize);
4586       CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
4587       MSARegister temp_1 = kSimd128ScratchReg;
4588       __ ld_b(temp_1, dst0);  // Save destination in temp_1.
4589       __ Lw(temp_0, src0);    // Then use temp_0 to copy source to destination.
4590       __ Sw(temp_0, dst0);
4591       __ Lw(temp_0, src1);
4592       __ Sw(temp_0, dst1);
4593       __ Lw(temp_0, src2);
4594       __ Sw(temp_0, dst2);
4595       __ Lw(temp_0, src3);
4596       __ Sw(temp_0, dst3);
4597       __ st_b(temp_1, src0);
4598     } else {
4599       FPURegister temp_1 = kScratchDoubleReg;
4600       __ Ldc1(temp_1, dst0);  // Save destination in temp_1.
4601       __ Lw(temp_0, src0);    // Then use temp_0 to copy source to destination.
4602       __ Sw(temp_0, dst0);
4603       __ Lw(temp_0, src1);
4604       __ Sw(temp_0, dst1);
4605       __ Sdc1(temp_1, src0);
4606     }
4607   } else {
4608     // No other combinations are possible.
4609     UNREACHABLE();
4610   }
4611 }
4612 
4613 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
4614   // On 64-bit MIPS we emit the jump tables inline.
4615   UNREACHABLE();
4616 }
4617 
4618 #undef ASSEMBLE_ATOMIC_LOAD_INTEGER
4619 #undef ASSEMBLE_ATOMIC_STORE_INTEGER
4620 #undef ASSEMBLE_ATOMIC_BINOP
4621 #undef ASSEMBLE_ATOMIC_BINOP_EXT
4622 #undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
4623 #undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT
4624 #undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
4625 #undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT
4626 #undef ASSEMBLE_IEEE754_BINOP
4627 #undef ASSEMBLE_IEEE754_UNOP
4628 #undef ASSEMBLE_F64X2_ARITHMETIC_BINOP
4629 
4630 #undef TRACE_MSG
4631 #undef TRACE_UNIMPL
4632 #undef __
4633 
4634 }  // namespace compiler
4635 }  // namespace internal
4636 }  // namespace v8
4637