• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
6 #define V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
7 
8 #include "src/wasm/baseline/liftoff-assembler.h"
9 
10 #define BAILOUT(reason) bailout("mips64 " reason)
11 
12 namespace v8 {
13 namespace internal {
14 namespace wasm {
15 
16 namespace liftoff {
17 
18 // fp-8 holds the stack marker, fp-16 is the instance parameter, first stack
19 // slot is located at fp-24.
20 constexpr int32_t kConstantStackSpace = 16;
21 constexpr int32_t kFirstStackSlotOffset =
22     kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
23 
GetStackSlot(uint32_t index)24 inline MemOperand GetStackSlot(uint32_t index) {
25   int32_t offset = index * LiftoffAssembler::kStackSlotSize;
26   return MemOperand(fp, -kFirstStackSlotOffset - offset);
27 }
28 
GetInstanceOperand()29 inline MemOperand GetInstanceOperand() { return MemOperand(fp, -16); }
30 
Load(LiftoffAssembler * assm,LiftoffRegister dst,MemOperand src,ValueType type)31 inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
32                  ValueType type) {
33   switch (type) {
34     case kWasmI32:
35       assm->lw(dst.gp(), src);
36       break;
37     case kWasmI64:
38       assm->ld(dst.gp(), src);
39       break;
40     case kWasmF32:
41       assm->lwc1(dst.fp(), src);
42       break;
43     case kWasmF64:
44       assm->Ldc1(dst.fp(), src);
45       break;
46     default:
47       UNREACHABLE();
48   }
49 }
50 
Store(LiftoffAssembler * assm,Register base,int32_t offset,LiftoffRegister src,ValueType type)51 inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
52                   LiftoffRegister src, ValueType type) {
53   MemOperand dst(base, offset);
54   switch (type) {
55     case kWasmI32:
56       assm->Usw(src.gp(), dst);
57       break;
58     case kWasmI64:
59       assm->Usd(src.gp(), dst);
60       break;
61     case kWasmF32:
62       assm->Uswc1(src.fp(), dst, t8);
63       break;
64     case kWasmF64:
65       assm->Usdc1(src.fp(), dst, t8);
66       break;
67     default:
68       UNREACHABLE();
69   }
70 }
71 
push(LiftoffAssembler * assm,LiftoffRegister reg,ValueType type)72 inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
73   switch (type) {
74     case kWasmI32:
75     case kWasmI64:
76       assm->push(reg.gp());
77       break;
78     case kWasmF32:
79       assm->daddiu(sp, sp, -kPointerSize);
80       assm->swc1(reg.fp(), MemOperand(sp, 0));
81       break;
82     case kWasmF64:
83       assm->daddiu(sp, sp, -kPointerSize);
84       assm->Sdc1(reg.fp(), MemOperand(sp, 0));
85       break;
86     default:
87       UNREACHABLE();
88   }
89 }
90 
91 #if defined(V8_TARGET_BIG_ENDIAN)
ChangeEndiannessLoad(LiftoffAssembler * assm,LiftoffRegister dst,LoadType type,LiftoffRegList pinned)92 inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst,
93                                  LoadType type, LiftoffRegList pinned) {
94   bool is_float = false;
95   LiftoffRegister tmp = dst;
96   switch (type.value()) {
97     case LoadType::kI64Load8U:
98     case LoadType::kI64Load8S:
99     case LoadType::kI32Load8U:
100     case LoadType::kI32Load8S:
101       // No need to change endianness for byte size.
102       return;
103     case LoadType::kF32Load:
104       is_float = true;
105       tmp = assm->GetUnusedRegister(kGpReg, pinned);
106       assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, dst);
107       V8_FALLTHROUGH;
108     case LoadType::kI64Load32U:
109       assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 4);
110       assm->dsrl32(tmp.gp(), tmp.gp(), 0);
111       break;
112     case LoadType::kI32Load:
113     case LoadType::kI64Load32S:
114       assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
115       assm->dsra32(tmp.gp(), tmp.gp(), 0);
116       break;
117     case LoadType::kI32Load16S:
118     case LoadType::kI64Load16S:
119       assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
120       assm->dsra32(tmp.gp(), tmp.gp(), 0);
121       break;
122     case LoadType::kI32Load16U:
123     case LoadType::kI64Load16U:
124       assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2);
125       assm->dsrl32(tmp.gp(), tmp.gp(), 0);
126       break;
127     case LoadType::kF64Load:
128       is_float = true;
129       tmp = assm->GetUnusedRegister(kGpReg, pinned);
130       assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, dst);
131       V8_FALLTHROUGH;
132     case LoadType::kI64Load:
133       assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8);
134       break;
135     default:
136       UNREACHABLE();
137   }
138 
139   if (is_float) {
140     switch (type.value()) {
141       case LoadType::kF32Load:
142         assm->emit_type_conversion(kExprF32ReinterpretI32, dst, tmp);
143         break;
144       case LoadType::kF64Load:
145         assm->emit_type_conversion(kExprF64ReinterpretI64, dst, tmp);
146         break;
147       default:
148         UNREACHABLE();
149     }
150   }
151 }
152 
ChangeEndiannessStore(LiftoffAssembler * assm,LiftoffRegister src,StoreType type,LiftoffRegList pinned)153 inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src,
154                                   StoreType type, LiftoffRegList pinned) {
155   bool is_float = false;
156   LiftoffRegister tmp = src;
157   switch (type.value()) {
158     case StoreType::kI64Store8:
159     case StoreType::kI32Store8:
160       // No need to change endianness for byte size.
161       return;
162     case StoreType::kF32Store:
163       is_float = true;
164       tmp = assm->GetUnusedRegister(kGpReg, pinned);
165       assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, src);
166       V8_FALLTHROUGH;
167     case StoreType::kI32Store:
168     case StoreType::kI32Store16:
169       assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
170       break;
171     case StoreType::kF64Store:
172       is_float = true;
173       tmp = assm->GetUnusedRegister(kGpReg, pinned);
174       assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, src);
175       V8_FALLTHROUGH;
176     case StoreType::kI64Store:
177     case StoreType::kI64Store32:
178     case StoreType::kI64Store16:
179       assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8);
180       break;
181     default:
182       UNREACHABLE();
183   }
184 
185   if (is_float) {
186     switch (type.value()) {
187       case StoreType::kF32Store:
188         assm->emit_type_conversion(kExprF32ReinterpretI32, src, tmp);
189         break;
190       case StoreType::kF64Store:
191         assm->emit_type_conversion(kExprF64ReinterpretI64, src, tmp);
192         break;
193       default:
194         UNREACHABLE();
195     }
196   }
197 }
198 #endif  // V8_TARGET_BIG_ENDIAN
199 
200 }  // namespace liftoff
201 
PrepareStackFrame()202 int LiftoffAssembler::PrepareStackFrame() {
203   int offset = pc_offset();
204   // When constant that represents size of stack frame can't be represented
205   // as 16bit we need three instructions to add it to sp, so we reserve space
206   // for this case.
207   daddiu(sp, sp, 0);
208   nop();
209   nop();
210   return offset;
211 }
212 
PatchPrepareStackFrame(int offset,uint32_t stack_slots)213 void LiftoffAssembler::PatchPrepareStackFrame(int offset,
214                                               uint32_t stack_slots) {
215   uint64_t bytes = liftoff::kConstantStackSpace + kStackSlotSize * stack_slots;
216   DCHECK_LE(bytes, kMaxInt);
217   // We can't run out of space, just pass anything big enough to not cause the
218   // assembler to try to grow the buffer.
219   constexpr int kAvailableSpace = 256;
220   TurboAssembler patching_assembler(nullptr, AssemblerOptions{},
221                                     buffer_ + offset, kAvailableSpace,
222                                     CodeObjectRequired::kNo);
223   // If bytes can be represented as 16bit, daddiu will be generated and two
224   // nops will stay untouched. Otherwise, lui-ori sequence will load it to
225   // register and, as third instruction, daddu will be generated.
226   patching_assembler.Daddu(sp, sp, Operand(-bytes));
227 }
228 
FinishCode()229 void LiftoffAssembler::FinishCode() {}
230 
AbortCompilation()231 void LiftoffAssembler::AbortCompilation() {}
232 
LoadConstant(LiftoffRegister reg,WasmValue value,RelocInfo::Mode rmode)233 void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
234                                     RelocInfo::Mode rmode) {
235   switch (value.type()) {
236     case kWasmI32:
237       TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
238       break;
239     case kWasmI64:
240       TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode));
241       break;
242     case kWasmF32:
243       TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
244       break;
245     case kWasmF64:
246       TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
247       break;
248     default:
249       UNREACHABLE();
250   }
251 }
252 
LoadFromInstance(Register dst,uint32_t offset,int size)253 void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
254                                         int size) {
255   DCHECK_LE(offset, kMaxInt);
256   ld(dst, liftoff::GetInstanceOperand());
257   DCHECK(size == 4 || size == 8);
258   if (size == 4) {
259     lw(dst, MemOperand(dst, offset));
260   } else {
261     ld(dst, MemOperand(dst, offset));
262   }
263 }
264 
SpillInstance(Register instance)265 void LiftoffAssembler::SpillInstance(Register instance) {
266   sd(instance, liftoff::GetInstanceOperand());
267 }
268 
FillInstanceInto(Register dst)269 void LiftoffAssembler::FillInstanceInto(Register dst) {
270   ld(dst, liftoff::GetInstanceOperand());
271 }
272 
Load(LiftoffRegister dst,Register src_addr,Register offset_reg,uint32_t offset_imm,LoadType type,LiftoffRegList pinned,uint32_t * protected_load_pc,bool is_load_mem)273 void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
274                             Register offset_reg, uint32_t offset_imm,
275                             LoadType type, LiftoffRegList pinned,
276                             uint32_t* protected_load_pc, bool is_load_mem) {
277   MemOperand src_op(src_addr, offset_imm);
278   if (offset_reg != no_reg) {
279     Register src = GetUnusedRegister(kGpReg, pinned).gp();
280     emit_ptrsize_add(src, src_addr, offset_reg);
281     src_op = MemOperand(src, offset_imm);
282   }
283 
284   if (protected_load_pc) *protected_load_pc = pc_offset();
285   switch (type.value()) {
286     case LoadType::kI32Load8U:
287     case LoadType::kI64Load8U:
288       lbu(dst.gp(), src_op);
289       break;
290     case LoadType::kI32Load8S:
291     case LoadType::kI64Load8S:
292       lb(dst.gp(), src_op);
293       break;
294     case LoadType::kI32Load16U:
295     case LoadType::kI64Load16U:
296       TurboAssembler::Ulhu(dst.gp(), src_op);
297       break;
298     case LoadType::kI32Load16S:
299     case LoadType::kI64Load16S:
300       TurboAssembler::Ulh(dst.gp(), src_op);
301       break;
302     case LoadType::kI64Load32U:
303       TurboAssembler::Ulwu(dst.gp(), src_op);
304       break;
305     case LoadType::kI32Load:
306     case LoadType::kI64Load32S:
307       TurboAssembler::Ulw(dst.gp(), src_op);
308       break;
309     case LoadType::kI64Load:
310       TurboAssembler::Uld(dst.gp(), src_op);
311       break;
312     case LoadType::kF32Load:
313       TurboAssembler::Ulwc1(dst.fp(), src_op, t8);
314       break;
315     case LoadType::kF64Load:
316       TurboAssembler::Uldc1(dst.fp(), src_op, t8);
317       break;
318     default:
319       UNREACHABLE();
320   }
321 
322 #if defined(V8_TARGET_BIG_ENDIAN)
323   if (is_load_mem) {
324     liftoff::ChangeEndiannessLoad(this, dst, type, pinned);
325   }
326 #endif
327 }
328 
Store(Register dst_addr,Register offset_reg,uint32_t offset_imm,LiftoffRegister src,StoreType type,LiftoffRegList pinned,uint32_t * protected_store_pc,bool is_store_mem)329 void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
330                              uint32_t offset_imm, LiftoffRegister src,
331                              StoreType type, LiftoffRegList pinned,
332                              uint32_t* protected_store_pc, bool is_store_mem) {
333   Register dst = no_reg;
334   if (offset_reg != no_reg) {
335     dst = GetUnusedRegister(kGpReg, pinned).gp();
336     emit_ptrsize_add(dst, dst_addr, offset_reg);
337   }
338   MemOperand dst_op = (offset_reg != no_reg) ? MemOperand(dst, offset_imm)
339                                              : MemOperand(dst_addr, offset_imm);
340 
341 #if defined(V8_TARGET_BIG_ENDIAN)
342   if (is_store_mem) {
343     LiftoffRegister tmp = GetUnusedRegister(src.reg_class(), pinned);
344     // Save original value.
345     Move(tmp, src, type.value_type());
346 
347     src = tmp;
348     pinned.set(tmp);
349     liftoff::ChangeEndiannessStore(this, src, type, pinned);
350   }
351 #endif
352 
353   if (protected_store_pc) *protected_store_pc = pc_offset();
354   switch (type.value()) {
355     case StoreType::kI32Store8:
356     case StoreType::kI64Store8:
357       sb(src.gp(), dst_op);
358       break;
359     case StoreType::kI32Store16:
360     case StoreType::kI64Store16:
361       TurboAssembler::Ush(src.gp(), dst_op, t8);
362       break;
363     case StoreType::kI32Store:
364     case StoreType::kI64Store32:
365       TurboAssembler::Usw(src.gp(), dst_op);
366       break;
367     case StoreType::kI64Store:
368       TurboAssembler::Usd(src.gp(), dst_op);
369       break;
370     case StoreType::kF32Store:
371       TurboAssembler::Uswc1(src.fp(), dst_op, t8);
372       break;
373     case StoreType::kF64Store:
374       TurboAssembler::Usdc1(src.fp(), dst_op, t8);
375       break;
376     default:
377       UNREACHABLE();
378   }
379 }
380 
LoadCallerFrameSlot(LiftoffRegister dst,uint32_t caller_slot_idx,ValueType type)381 void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
382                                            uint32_t caller_slot_idx,
383                                            ValueType type) {
384   MemOperand src(fp, kPointerSize * (caller_slot_idx + 1));
385   liftoff::Load(this, dst, src, type);
386 }
387 
MoveStackValue(uint32_t dst_index,uint32_t src_index,ValueType type)388 void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
389                                       ValueType type) {
390   DCHECK_NE(dst_index, src_index);
391   LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
392   Fill(reg, src_index, type);
393   Spill(dst_index, reg, type);
394 }
395 
Move(Register dst,Register src,ValueType type)396 void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
397   DCHECK_NE(dst, src);
398   // TODO(ksreten): Handle different sizes here.
399   TurboAssembler::Move(dst, src);
400 }
401 
Move(DoubleRegister dst,DoubleRegister src,ValueType type)402 void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
403                             ValueType type) {
404   DCHECK_NE(dst, src);
405   TurboAssembler::Move(dst, src);
406 }
407 
Spill(uint32_t index,LiftoffRegister reg,ValueType type)408 void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
409                              ValueType type) {
410   RecordUsedSpillSlot(index);
411   MemOperand dst = liftoff::GetStackSlot(index);
412   switch (type) {
413     case kWasmI32:
414       sw(reg.gp(), dst);
415       break;
416     case kWasmI64:
417       sd(reg.gp(), dst);
418       break;
419     case kWasmF32:
420       swc1(reg.fp(), dst);
421       break;
422     case kWasmF64:
423       TurboAssembler::Sdc1(reg.fp(), dst);
424       break;
425     default:
426       UNREACHABLE();
427   }
428 }
429 
Spill(uint32_t index,WasmValue value)430 void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
431   RecordUsedSpillSlot(index);
432   MemOperand dst = liftoff::GetStackSlot(index);
433   switch (value.type()) {
434     case kWasmI32: {
435       LiftoffRegister tmp = GetUnusedRegister(kGpReg);
436       TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
437       sw(tmp.gp(), dst);
438       break;
439     }
440     case kWasmI64: {
441       LiftoffRegister tmp = GetUnusedRegister(kGpReg);
442       TurboAssembler::li(tmp.gp(), value.to_i64());
443       sd(tmp.gp(), dst);
444       break;
445     }
446     default:
447       // kWasmF32 and kWasmF64 are unreachable, since those
448       // constants are not tracked.
449       UNREACHABLE();
450   }
451 }
452 
Fill(LiftoffRegister reg,uint32_t index,ValueType type)453 void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
454                             ValueType type) {
455   MemOperand src = liftoff::GetStackSlot(index);
456   switch (type) {
457     case kWasmI32:
458       lw(reg.gp(), src);
459       break;
460     case kWasmI64:
461       ld(reg.gp(), src);
462       break;
463     case kWasmF32:
464       lwc1(reg.fp(), src);
465       break;
466     case kWasmF64:
467       TurboAssembler::Ldc1(reg.fp(), src);
468       break;
469     default:
470       UNREACHABLE();
471   }
472 }
473 
FillI64Half(Register,uint32_t half_index)474 void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
475   UNREACHABLE();
476 }
477 
emit_i32_mul(Register dst,Register lhs,Register rhs)478 void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
479   TurboAssembler::Mul(dst, lhs, rhs);
480 }
481 
emit_i32_divs(Register dst,Register lhs,Register rhs,Label * trap_div_by_zero,Label * trap_div_unrepresentable)482 void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
483                                      Label* trap_div_by_zero,
484                                      Label* trap_div_unrepresentable) {
485   TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
486 
487   // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable.
488   TurboAssembler::li(kScratchReg, 1);
489   TurboAssembler::li(kScratchReg2, 1);
490   TurboAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq);
491   TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq);
492   daddu(kScratchReg, kScratchReg, kScratchReg2);
493   TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
494                          Operand(zero_reg));
495 
496   TurboAssembler::Div(dst, lhs, rhs);
497 }
498 
emit_i32_divu(Register dst,Register lhs,Register rhs,Label * trap_div_by_zero)499 void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
500                                      Label* trap_div_by_zero) {
501   TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
502   TurboAssembler::Divu(dst, lhs, rhs);
503 }
504 
emit_i32_rems(Register dst,Register lhs,Register rhs,Label * trap_div_by_zero)505 void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
506                                      Label* trap_div_by_zero) {
507   TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
508   TurboAssembler::Mod(dst, lhs, rhs);
509 }
510 
emit_i32_remu(Register dst,Register lhs,Register rhs,Label * trap_div_by_zero)511 void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
512                                      Label* trap_div_by_zero) {
513   TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
514   TurboAssembler::Modu(dst, lhs, rhs);
515 }
516 
517 #define I32_BINOP(name, instruction)                                 \
518   void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
519                                          Register rhs) {             \
520     instruction(dst, lhs, rhs);                                      \
521   }
522 
523 // clang-format off
I32_BINOP(add,addu)524 I32_BINOP(add, addu)
525 I32_BINOP(sub, subu)
526 I32_BINOP(and, and_)
527 I32_BINOP(or, or_)
528 I32_BINOP(xor, xor_)
529 // clang-format on
530 
531 #undef I32_BINOP
532 
533 bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
534   TurboAssembler::Clz(dst, src);
535   return true;
536 }
537 
emit_i32_ctz(Register dst,Register src)538 bool LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
539   TurboAssembler::Ctz(dst, src);
540   return true;
541 }
542 
emit_i32_popcnt(Register dst,Register src)543 bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
544   TurboAssembler::Popcnt(dst, src);
545   return true;
546 }
547 
548 #define I32_SHIFTOP(name, instruction)                                      \
549   void LiftoffAssembler::emit_i32_##name(                                   \
550       Register dst, Register src, Register amount, LiftoffRegList pinned) { \
551     instruction(dst, src, amount);                                          \
552   }
553 
I32_SHIFTOP(shl,sllv)554 I32_SHIFTOP(shl, sllv)
555 I32_SHIFTOP(sar, srav)
556 I32_SHIFTOP(shr, srlv)
557 
558 #undef I32_SHIFTOP
559 
560 void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
561                                     LiftoffRegister rhs) {
562   TurboAssembler::Dmul(dst.gp(), lhs.gp(), rhs.gp());
563 }
564 
emit_i64_divs(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs,Label * trap_div_by_zero,Label * trap_div_unrepresentable)565 bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
566                                      LiftoffRegister rhs,
567                                      Label* trap_div_by_zero,
568                                      Label* trap_div_unrepresentable) {
569   TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
570 
571   // Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable.
572   TurboAssembler::li(kScratchReg, 1);
573   TurboAssembler::li(kScratchReg2, 1);
574   TurboAssembler::LoadZeroOnCondition(
575       kScratchReg, lhs.gp(), Operand(std::numeric_limits<int64_t>::min()), eq);
576   TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs.gp(), Operand(-1), eq);
577   daddu(kScratchReg, kScratchReg, kScratchReg2);
578   TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
579                          Operand(zero_reg));
580 
581   TurboAssembler::Ddiv(dst.gp(), lhs.gp(), rhs.gp());
582   return true;
583 }
584 
emit_i64_divu(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs,Label * trap_div_by_zero)585 bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
586                                      LiftoffRegister rhs,
587                                      Label* trap_div_by_zero) {
588   TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
589   TurboAssembler::Ddivu(dst.gp(), lhs.gp(), rhs.gp());
590   return true;
591 }
592 
emit_i64_rems(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs,Label * trap_div_by_zero)593 bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
594                                      LiftoffRegister rhs,
595                                      Label* trap_div_by_zero) {
596   TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
597   TurboAssembler::Dmod(dst.gp(), lhs.gp(), rhs.gp());
598   return true;
599 }
600 
emit_i64_remu(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs,Label * trap_div_by_zero)601 bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
602                                      LiftoffRegister rhs,
603                                      Label* trap_div_by_zero) {
604   TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
605   TurboAssembler::Dmodu(dst.gp(), lhs.gp(), rhs.gp());
606   return true;
607 }
608 
609 #define I64_BINOP(name, instruction)                                   \
610   void LiftoffAssembler::emit_i64_##name(                              \
611       LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
612     instruction(dst.gp(), lhs.gp(), rhs.gp());                         \
613   }
614 
615 // clang-format off
I64_BINOP(add,daddu)616 I64_BINOP(add, daddu)
617 I64_BINOP(sub, dsubu)
618 I64_BINOP(and, and_)
619 I64_BINOP(or, or_)
620 I64_BINOP(xor, xor_)
621 // clang-format on
622 
623 #undef I64_BINOP
624 
625 #define I64_SHIFTOP(name, instruction)                                         \
626   void LiftoffAssembler::emit_i64_##name(LiftoffRegister dst,                  \
627                                          LiftoffRegister src, Register amount, \
628                                          LiftoffRegList pinned) {              \
629     instruction(dst.gp(), src.gp(), amount);                                   \
630   }
631 
632 I64_SHIFTOP(shl, dsllv)
633 I64_SHIFTOP(sar, dsrav)
634 I64_SHIFTOP(shr, dsrlv)
635 
636 #undef I64_SHIFTOP
637 
638 void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
639   addu(dst, src, zero_reg);
640 }
641 
emit_f32_neg(DoubleRegister dst,DoubleRegister src)642 void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
643   TurboAssembler::Neg_s(dst, src);
644 }
645 
emit_f64_neg(DoubleRegister dst,DoubleRegister src)646 void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
647   TurboAssembler::Neg_d(dst, src);
648 }
649 
emit_f32_min(DoubleRegister dst,DoubleRegister lhs,DoubleRegister rhs)650 void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
651                                     DoubleRegister rhs) {
652   Label ool, done;
653   TurboAssembler::Float32Min(dst, lhs, rhs, &ool);
654   Branch(&done);
655 
656   bind(&ool);
657   TurboAssembler::Float32MinOutOfLine(dst, lhs, rhs);
658   bind(&done);
659 }
660 
emit_f32_max(DoubleRegister dst,DoubleRegister lhs,DoubleRegister rhs)661 void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
662                                     DoubleRegister rhs) {
663   Label ool, done;
664   TurboAssembler::Float32Max(dst, lhs, rhs, &ool);
665   Branch(&done);
666 
667   bind(&ool);
668   TurboAssembler::Float32MaxOutOfLine(dst, lhs, rhs);
669   bind(&done);
670 }
671 
emit_f64_min(DoubleRegister dst,DoubleRegister lhs,DoubleRegister rhs)672 void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
673                                     DoubleRegister rhs) {
674   Label ool, done;
675   TurboAssembler::Float64Min(dst, lhs, rhs, &ool);
676   Branch(&done);
677 
678   bind(&ool);
679   TurboAssembler::Float64MinOutOfLine(dst, lhs, rhs);
680   bind(&done);
681 }
682 
emit_f64_max(DoubleRegister dst,DoubleRegister lhs,DoubleRegister rhs)683 void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
684                                     DoubleRegister rhs) {
685   Label ool, done;
686   TurboAssembler::Float64Max(dst, lhs, rhs, &ool);
687   Branch(&done);
688 
689   bind(&ool);
690   TurboAssembler::Float64MaxOutOfLine(dst, lhs, rhs);
691   bind(&done);
692 }
693 
694 #define FP_BINOP(name, instruction)                                          \
695   void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
696                                      DoubleRegister rhs) {                   \
697     instruction(dst, lhs, rhs);                                              \
698   }
699 #define FP_UNOP(name, instruction)                                             \
700   void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
701     instruction(dst, src);                                                     \
702   }
703 #define FP_UNOP_RETURN_TRUE(name, instruction)                                 \
704   bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
705     instruction(dst, src);                                                     \
706     return true;                                                               \
707   }
708 
FP_BINOP(f32_add,add_s)709 FP_BINOP(f32_add, add_s)
710 FP_BINOP(f32_sub, sub_s)
711 FP_BINOP(f32_mul, mul_s)
712 FP_BINOP(f32_div, div_s)
713 FP_UNOP(f32_abs, abs_s)
714 FP_UNOP(f32_ceil, Ceil_s_s)
715 FP_UNOP(f32_floor, Floor_s_s)
716 FP_UNOP(f32_trunc, Trunc_s_s)
717 FP_UNOP(f32_nearest_int, Round_s_s)
718 FP_UNOP(f32_sqrt, sqrt_s)
719 FP_BINOP(f64_add, add_d)
720 FP_BINOP(f64_sub, sub_d)
721 FP_BINOP(f64_mul, mul_d)
722 FP_BINOP(f64_div, div_d)
723 FP_UNOP(f64_abs, abs_d)
724 FP_UNOP_RETURN_TRUE(f64_ceil, Ceil_d_d)
725 FP_UNOP_RETURN_TRUE(f64_floor, Floor_d_d)
726 FP_UNOP_RETURN_TRUE(f64_trunc, Trunc_d_d)
727 FP_UNOP_RETURN_TRUE(f64_nearest_int, Round_d_d)
728 FP_UNOP(f64_sqrt, sqrt_d)
729 
730 #undef FP_BINOP
731 #undef FP_UNOP
732 #undef FP_UNOP_RETURN_TRUE
733 
734 bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
735                                             LiftoffRegister dst,
736                                             LiftoffRegister src, Label* trap) {
737   switch (opcode) {
738     case kExprI32ConvertI64:
739       TurboAssembler::Ext(dst.gp(), src.gp(), 0, 32);
740       return true;
741     case kExprI32SConvertF32: {
742       LiftoffRegister rounded =
743           GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
744       LiftoffRegister converted_back =
745           GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
746 
747       // Real conversion.
748       TurboAssembler::Trunc_s_s(rounded.fp(), src.fp());
749       trunc_w_s(kScratchDoubleReg, rounded.fp());
750       mfc1(dst.gp(), kScratchDoubleReg);
751       // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
752       // because INT32_MIN allows easier out-of-bounds detection.
753       TurboAssembler::Addu(kScratchReg, dst.gp(), 1);
754       TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
755       TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
756 
757       // Checking if trap.
758       mtc1(dst.gp(), kScratchDoubleReg);
759       cvt_s_w(converted_back.fp(), kScratchDoubleReg);
760       TurboAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp());
761       TurboAssembler::BranchFalseF(trap);
762       return true;
763     }
764     case kExprI32UConvertF32: {
765       LiftoffRegister rounded =
766           GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
767       LiftoffRegister converted_back =
768           GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
769 
770       // Real conversion.
771       TurboAssembler::Trunc_s_s(rounded.fp(), src.fp());
772       TurboAssembler::Trunc_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg);
773       // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
774       // because 0 allows easier out-of-bounds detection.
775       TurboAssembler::Addu(kScratchReg, dst.gp(), 1);
776       TurboAssembler::Movz(dst.gp(), zero_reg, kScratchReg);
777 
778       // Checking if trap.
779       TurboAssembler::Cvt_d_uw(converted_back.fp(), dst.gp());
780       cvt_s_d(converted_back.fp(), converted_back.fp());
781       TurboAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp());
782       TurboAssembler::BranchFalseF(trap);
783       return true;
784     }
785     case kExprI32SConvertF64: {
786       LiftoffRegister rounded =
787           GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
788       LiftoffRegister converted_back =
789           GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
790 
791       // Real conversion.
792       TurboAssembler::Trunc_d_d(rounded.fp(), src.fp());
793       trunc_w_d(kScratchDoubleReg, rounded.fp());
794       mfc1(dst.gp(), kScratchDoubleReg);
795 
796       // Checking if trap.
797       cvt_d_w(converted_back.fp(), kScratchDoubleReg);
798       TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
799       TurboAssembler::BranchFalseF(trap);
800       return true;
801     }
802     case kExprI32UConvertF64: {
803       LiftoffRegister rounded =
804           GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
805       LiftoffRegister converted_back =
806           GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
807 
808       // Real conversion.
809       TurboAssembler::Trunc_d_d(rounded.fp(), src.fp());
810       TurboAssembler::Trunc_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg);
811 
812       // Checking if trap.
813       TurboAssembler::Cvt_d_uw(converted_back.fp(), dst.gp());
814       TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
815       TurboAssembler::BranchFalseF(trap);
816       return true;
817     }
818     case kExprI32ReinterpretF32:
819       TurboAssembler::FmoveLow(dst.gp(), src.fp());
820       return true;
821     case kExprI64SConvertI32:
822       sll(dst.gp(), src.gp(), 0);
823       return true;
824     case kExprI64UConvertI32:
825       TurboAssembler::Dext(dst.gp(), src.gp(), 0, 32);
826       return true;
827     case kExprI64SConvertF32: {
828       LiftoffRegister rounded =
829           GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
830       LiftoffRegister converted_back =
831           GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
832 
833       // Real conversion.
834       TurboAssembler::Trunc_s_s(rounded.fp(), src.fp());
835       trunc_l_s(kScratchDoubleReg, rounded.fp());
836       dmfc1(dst.gp(), kScratchDoubleReg);
837       // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
838       // because INT64_MIN allows easier out-of-bounds detection.
839       TurboAssembler::Daddu(kScratchReg, dst.gp(), 1);
840       TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
841       TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
842 
843       // Checking if trap.
844       dmtc1(dst.gp(), kScratchDoubleReg);
845       cvt_s_l(converted_back.fp(), kScratchDoubleReg);
846       TurboAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp());
847       TurboAssembler::BranchFalseF(trap);
848       return true;
849     }
850     case kExprI64UConvertF32: {
851       // Real conversion.
852       TurboAssembler::Trunc_ul_s(dst.gp(), src.fp(), kScratchDoubleReg,
853                                  kScratchReg);
854 
855       // Checking if trap.
856       TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
857       return true;
858     }
859     case kExprI64SConvertF64: {
860       LiftoffRegister rounded =
861           GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
862       LiftoffRegister converted_back =
863           GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
864 
865       // Real conversion.
866       TurboAssembler::Trunc_d_d(rounded.fp(), src.fp());
867       trunc_l_d(kScratchDoubleReg, rounded.fp());
868       dmfc1(dst.gp(), kScratchDoubleReg);
869       // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
870       // because INT64_MIN allows easier out-of-bounds detection.
871       TurboAssembler::Daddu(kScratchReg, dst.gp(), 1);
872       TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
873       TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
874 
875       // Checking if trap.
876       dmtc1(dst.gp(), kScratchDoubleReg);
877       cvt_d_l(converted_back.fp(), kScratchDoubleReg);
878       TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
879       TurboAssembler::BranchFalseF(trap);
880       return true;
881     }
882     case kExprI64UConvertF64: {
883       // Real conversion.
884       TurboAssembler::Trunc_ul_d(dst.gp(), src.fp(), kScratchDoubleReg,
885                                  kScratchReg);
886 
887       // Checking if trap.
888       TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
889       return true;
890     }
891     case kExprI64ReinterpretF64:
892       dmfc1(dst.gp(), src.fp());
893       return true;
894     case kExprF32SConvertI32: {
895       LiftoffRegister scratch =
896           GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
897       mtc1(src.gp(), scratch.fp());
898       cvt_s_w(dst.fp(), scratch.fp());
899       return true;
900     }
901     case kExprF32UConvertI32:
902       TurboAssembler::Cvt_s_uw(dst.fp(), src.gp());
903       return true;
904     case kExprF32ConvertF64:
905       cvt_s_d(dst.fp(), src.fp());
906       return true;
907     case kExprF32ReinterpretI32:
908       TurboAssembler::FmoveLow(dst.fp(), src.gp());
909       return true;
910     case kExprF64SConvertI32: {
911       LiftoffRegister scratch =
912           GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
913       mtc1(src.gp(), scratch.fp());
914       cvt_d_w(dst.fp(), scratch.fp());
915       return true;
916     }
917     case kExprF64UConvertI32:
918       TurboAssembler::Cvt_d_uw(dst.fp(), src.gp());
919       return true;
920     case kExprF64ConvertF32:
921       cvt_d_s(dst.fp(), src.fp());
922       return true;
923     case kExprF64ReinterpretI64:
924       dmtc1(src.gp(), dst.fp());
925       return true;
926     default:
927       return false;
928   }
929 }
930 
emit_jump(Label * label)931 void LiftoffAssembler::emit_jump(Label* label) {
932   TurboAssembler::Branch(label);
933 }
934 
emit_jump(Register target)935 void LiftoffAssembler::emit_jump(Register target) {
936   TurboAssembler::Jump(target);
937 }
938 
emit_cond_jump(Condition cond,Label * label,ValueType type,Register lhs,Register rhs)939 void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
940                                       ValueType type, Register lhs,
941                                       Register rhs) {
942   if (rhs != no_reg) {
943     TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
944   } else {
945     TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
946   }
947 }
948 
emit_i32_eqz(Register dst,Register src)949 void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
950   sltiu(dst, src, 1);
951 }
952 
emit_i32_set_cond(Condition cond,Register dst,Register lhs,Register rhs)953 void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
954                                          Register lhs, Register rhs) {
955   Register tmp = dst;
956   if (dst == lhs || dst == rhs) {
957     tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
958   }
959   // Write 1 as result.
960   TurboAssembler::li(tmp, 1);
961 
962   // If negative condition is true, write 0 as result.
963   Condition neg_cond = NegateCondition(cond);
964   TurboAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond);
965 
966   // If tmp != dst, result will be moved.
967   TurboAssembler::Move(dst, tmp);
968 }
969 
emit_i64_eqz(Register dst,LiftoffRegister src)970 void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
971   sltiu(dst, src.gp(), 1);
972 }
973 
emit_i64_set_cond(Condition cond,Register dst,LiftoffRegister lhs,LiftoffRegister rhs)974 void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
975                                          LiftoffRegister lhs,
976                                          LiftoffRegister rhs) {
977   Register tmp = dst;
978   if (dst == lhs.gp() || dst == rhs.gp()) {
979     tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
980   }
981   // Write 1 as result.
982   TurboAssembler::li(tmp, 1);
983 
984   // If negative condition is true, write 0 as result.
985   Condition neg_cond = NegateCondition(cond);
986   TurboAssembler::LoadZeroOnCondition(tmp, lhs.gp(), Operand(rhs.gp()),
987                                       neg_cond);
988 
989   // If tmp != dst, result will be moved.
990   TurboAssembler::Move(dst, tmp);
991 }
992 
993 namespace liftoff {
994 
ConditionToConditionCmpFPU(bool & predicate,Condition condition)995 inline FPUCondition ConditionToConditionCmpFPU(bool& predicate,
996                                                Condition condition) {
997   switch (condition) {
998     case kEqual:
999       predicate = true;
1000       return EQ;
1001     case kUnequal:
1002       predicate = false;
1003       return EQ;
1004     case kUnsignedLessThan:
1005       predicate = true;
1006       return OLT;
1007     case kUnsignedGreaterEqual:
1008       predicate = false;
1009       return OLT;
1010     case kUnsignedLessEqual:
1011       predicate = true;
1012       return OLE;
1013     case kUnsignedGreaterThan:
1014       predicate = false;
1015       return OLE;
1016     default:
1017       predicate = true;
1018       break;
1019   }
1020   UNREACHABLE();
1021 }
1022 
1023 };  // namespace liftoff
1024 
emit_f32_set_cond(Condition cond,Register dst,DoubleRegister lhs,DoubleRegister rhs)1025 void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
1026                                          DoubleRegister lhs,
1027                                          DoubleRegister rhs) {
1028   Label not_nan, cont;
1029   TurboAssembler::CompareIsNanF32(lhs, rhs);
1030   TurboAssembler::BranchFalseF(&not_nan);
1031   // If one of the operands is NaN, return 1 for f32.ne, else 0.
1032   if (cond == ne) {
1033     TurboAssembler::li(dst, 1);
1034   } else {
1035     TurboAssembler::Move(dst, zero_reg);
1036   }
1037   TurboAssembler::Branch(&cont);
1038 
1039   bind(&not_nan);
1040 
1041   TurboAssembler::li(dst, 1);
1042   bool predicate;
1043   FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(predicate, cond);
1044   TurboAssembler::CompareF32(fcond, lhs, rhs);
1045   if (predicate) {
1046     TurboAssembler::LoadZeroIfNotFPUCondition(dst);
1047   } else {
1048     TurboAssembler::LoadZeroIfFPUCondition(dst);
1049   }
1050 
1051   bind(&cont);
1052 }
1053 
emit_f64_set_cond(Condition cond,Register dst,DoubleRegister lhs,DoubleRegister rhs)1054 void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
1055                                          DoubleRegister lhs,
1056                                          DoubleRegister rhs) {
1057   Label not_nan, cont;
1058   TurboAssembler::CompareIsNanF64(lhs, rhs);
1059   TurboAssembler::BranchFalseF(&not_nan);
1060   // If one of the operands is NaN, return 1 for f64.ne, else 0.
1061   if (cond == ne) {
1062     TurboAssembler::li(dst, 1);
1063   } else {
1064     TurboAssembler::Move(dst, zero_reg);
1065   }
1066   TurboAssembler::Branch(&cont);
1067 
1068   bind(&not_nan);
1069 
1070   TurboAssembler::li(dst, 1);
1071   bool predicate;
1072   FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(predicate, cond);
1073   TurboAssembler::CompareF64(fcond, lhs, rhs);
1074   if (predicate) {
1075     TurboAssembler::LoadZeroIfNotFPUCondition(dst);
1076   } else {
1077     TurboAssembler::LoadZeroIfFPUCondition(dst);
1078   }
1079 
1080   bind(&cont);
1081 }
1082 
StackCheck(Label * ool_code,Register limit_address)1083 void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
1084   TurboAssembler::Uld(limit_address, MemOperand(limit_address));
1085   TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address));
1086 }
1087 
CallTrapCallbackForTesting()1088 void LiftoffAssembler::CallTrapCallbackForTesting() {
1089   PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
1090   CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
1091 }
1092 
AssertUnreachable(AbortReason reason)1093 void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
1094   if (emit_debug_code()) Abort(reason);
1095 }
1096 
PushRegisters(LiftoffRegList regs)1097 void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
1098   LiftoffRegList gp_regs = regs & kGpCacheRegList;
1099   unsigned num_gp_regs = gp_regs.GetNumRegsSet();
1100   if (num_gp_regs) {
1101     unsigned offset = num_gp_regs * kPointerSize;
1102     daddiu(sp, sp, -offset);
1103     while (!gp_regs.is_empty()) {
1104       LiftoffRegister reg = gp_regs.GetFirstRegSet();
1105       offset -= kPointerSize;
1106       sd(reg.gp(), MemOperand(sp, offset));
1107       gp_regs.clear(reg);
1108     }
1109     DCHECK_EQ(offset, 0);
1110   }
1111   LiftoffRegList fp_regs = regs & kFpCacheRegList;
1112   unsigned num_fp_regs = fp_regs.GetNumRegsSet();
1113   if (num_fp_regs) {
1114     daddiu(sp, sp, -(num_fp_regs * kStackSlotSize));
1115     unsigned offset = 0;
1116     while (!fp_regs.is_empty()) {
1117       LiftoffRegister reg = fp_regs.GetFirstRegSet();
1118       TurboAssembler::Sdc1(reg.fp(), MemOperand(sp, offset));
1119       fp_regs.clear(reg);
1120       offset += sizeof(double);
1121     }
1122     DCHECK_EQ(offset, num_fp_regs * sizeof(double));
1123   }
1124 }
1125 
PopRegisters(LiftoffRegList regs)1126 void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
1127   LiftoffRegList fp_regs = regs & kFpCacheRegList;
1128   unsigned fp_offset = 0;
1129   while (!fp_regs.is_empty()) {
1130     LiftoffRegister reg = fp_regs.GetFirstRegSet();
1131     TurboAssembler::Ldc1(reg.fp(), MemOperand(sp, fp_offset));
1132     fp_regs.clear(reg);
1133     fp_offset += sizeof(double);
1134   }
1135   if (fp_offset) daddiu(sp, sp, fp_offset);
1136   LiftoffRegList gp_regs = regs & kGpCacheRegList;
1137   unsigned gp_offset = 0;
1138   while (!gp_regs.is_empty()) {
1139     LiftoffRegister reg = gp_regs.GetLastRegSet();
1140     ld(reg.gp(), MemOperand(sp, gp_offset));
1141     gp_regs.clear(reg);
1142     gp_offset += kPointerSize;
1143   }
1144   daddiu(sp, sp, gp_offset);
1145 }
1146 
DropStackSlotsAndRet(uint32_t num_stack_slots)1147 void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
1148   DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize);  // 16 bit immediate
1149   TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots));
1150 }
1151 
CallC(wasm::FunctionSig * sig,const LiftoffRegister * args,const LiftoffRegister * rets,ValueType out_argument_type,int stack_bytes,ExternalReference ext_ref)1152 void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
1153                              const LiftoffRegister* args,
1154                              const LiftoffRegister* rets,
1155                              ValueType out_argument_type, int stack_bytes,
1156                              ExternalReference ext_ref) {
1157   daddiu(sp, sp, -stack_bytes);
1158 
1159   int arg_bytes = 0;
1160   for (ValueType param_type : sig->parameters()) {
1161     liftoff::Store(this, sp, arg_bytes, *args++, param_type);
1162     arg_bytes += ValueTypes::MemSize(param_type);
1163   }
1164   DCHECK_LE(arg_bytes, stack_bytes);
1165 
1166   // Pass a pointer to the buffer with the arguments to the C function.
1167   // On mips, the first argument is passed in {a0}.
1168   constexpr Register kFirstArgReg = a0;
1169   mov(kFirstArgReg, sp);
1170 
1171   // Now call the C function.
1172   constexpr int kNumCCallArgs = 1;
1173   PrepareCallCFunction(kNumCCallArgs, kScratchReg);
1174   CallCFunction(ext_ref, kNumCCallArgs);
1175 
1176   // Move return value to the right register.
1177   const LiftoffRegister* next_result_reg = rets;
1178   if (sig->return_count() > 0) {
1179     DCHECK_EQ(1, sig->return_count());
1180     constexpr Register kReturnReg = v0;
1181     if (kReturnReg != next_result_reg->gp()) {
1182       Move(*next_result_reg, LiftoffRegister(kReturnReg), sig->GetReturn(0));
1183     }
1184     ++next_result_reg;
1185   }
1186 
1187   // Load potential output value from the buffer on the stack.
1188   if (out_argument_type != kWasmStmt) {
1189     liftoff::Load(this, *next_result_reg, MemOperand(sp, 0), out_argument_type);
1190   }
1191 
1192   daddiu(sp, sp, stack_bytes);
1193 }
1194 
CallNativeWasmCode(Address addr)1195 void LiftoffAssembler::CallNativeWasmCode(Address addr) {
1196   Call(addr, RelocInfo::WASM_CALL);
1197 }
1198 
CallIndirect(wasm::FunctionSig * sig,compiler::CallDescriptor * call_descriptor,Register target)1199 void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
1200                                     compiler::CallDescriptor* call_descriptor,
1201                                     Register target) {
1202   if (target == no_reg) {
1203     pop(kScratchReg);
1204     Call(kScratchReg);
1205   } else {
1206     Call(target);
1207   }
1208 }
1209 
CallRuntimeStub(WasmCode::RuntimeStubId sid)1210 void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
1211   // A direct call to a wasm runtime stub defined in this module.
1212   // Just encode the stub index. This will be patched at relocation.
1213   Call(static_cast<Address>(sid), RelocInfo::WASM_STUB_CALL);
1214 }
1215 
AllocateStackSlot(Register addr,uint32_t size)1216 void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
1217   daddiu(sp, sp, -size);
1218   TurboAssembler::Move(addr, sp);
1219 }
1220 
DeallocateStackSlot(uint32_t size)1221 void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
1222   daddiu(sp, sp, size);
1223 }
1224 
Construct()1225 void LiftoffStackSlots::Construct() {
1226   for (auto& slot : slots_) {
1227     const LiftoffAssembler::VarState& src = slot.src_;
1228     switch (src.loc()) {
1229       case LiftoffAssembler::VarState::kStack:
1230         asm_->ld(kScratchReg, liftoff::GetStackSlot(slot.src_index_));
1231         asm_->push(kScratchReg);
1232         break;
1233       case LiftoffAssembler::VarState::kRegister:
1234         liftoff::push(asm_, src.reg(), src.type());
1235         break;
1236       case LiftoffAssembler::VarState::KIntConst: {
1237         asm_->li(kScratchReg, Operand(src.i32_const()));
1238         asm_->push(kScratchReg);
1239         break;
1240       }
1241     }
1242   }
1243 }
1244 
1245 }  // namespace wasm
1246 }  // namespace internal
1247 }  // namespace v8
1248 
1249 #undef BAILOUT
1250 
1251 #endif  // V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
1252