• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_WASM_BASELINE_MIPS_LIFTOFF_ASSEMBLER_MIPS_H_
6 #define V8_WASM_BASELINE_MIPS_LIFTOFF_ASSEMBLER_MIPS_H_
7 
8 #include "src/wasm/baseline/liftoff-assembler.h"
9 
10 #define BAILOUT(reason) bailout("mips " reason)
11 
12 namespace v8 {
13 namespace internal {
14 namespace wasm {
15 
16 namespace liftoff {
17 
18 // fp-4 holds the stack marker, fp-8 is the instance parameter, first stack
19 // slot is located at fp-16.
20 constexpr int32_t kConstantStackSpace = 8;
21 constexpr int32_t kFirstStackSlotOffset =
22     kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
23 
GetStackSlot(uint32_t index)24 inline MemOperand GetStackSlot(uint32_t index) {
25   int32_t offset = index * LiftoffAssembler::kStackSlotSize;
26   return MemOperand(fp, -kFirstStackSlotOffset - offset);
27 }
28 
GetHalfStackSlot(uint32_t half_index)29 inline MemOperand GetHalfStackSlot(uint32_t half_index) {
30   int32_t offset = half_index * (LiftoffAssembler::kStackSlotSize / 2);
31   return MemOperand(fp, -kFirstStackSlotOffset - offset);
32 }
33 
GetInstanceOperand()34 inline MemOperand GetInstanceOperand() { return MemOperand(fp, -8); }
35 
Load(LiftoffAssembler * assm,LiftoffRegister dst,Register base,int32_t offset,ValueType type)36 inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
37                  int32_t offset, ValueType type) {
38   MemOperand src(base, offset);
39   switch (type) {
40     case kWasmI32:
41       assm->lw(dst.gp(), src);
42       break;
43     case kWasmI64:
44       assm->lw(dst.low_gp(), src);
45       assm->lw(dst.high_gp(), MemOperand(base, offset + 4));
46       break;
47     case kWasmF32:
48       assm->lwc1(dst.fp(), src);
49       break;
50     case kWasmF64:
51       assm->Ldc1(dst.fp(), src);
52       break;
53     default:
54       UNREACHABLE();
55   }
56 }
57 
Store(LiftoffAssembler * assm,Register base,int32_t offset,LiftoffRegister src,ValueType type)58 inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
59                   LiftoffRegister src, ValueType type) {
60   MemOperand dst(base, offset);
61   switch (type) {
62     case kWasmI32:
63       assm->Usw(src.gp(), dst);
64       break;
65     case kWasmI64:
66       assm->Usw(src.low_gp(), dst);
67       assm->Usw(src.high_gp(), MemOperand(base, offset + 4));
68       break;
69     case kWasmF32:
70       assm->Uswc1(src.fp(), dst, t8);
71       break;
72     case kWasmF64:
73       assm->Usdc1(src.fp(), dst, t8);
74       break;
75     default:
76       UNREACHABLE();
77   }
78 }
79 
push(LiftoffAssembler * assm,LiftoffRegister reg,ValueType type)80 inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
81   switch (type) {
82     case kWasmI32:
83       assm->push(reg.gp());
84       break;
85     case kWasmI64:
86       assm->Push(reg.high_gp(), reg.low_gp());
87       break;
88     case kWasmF32:
89       assm->addiu(sp, sp, -sizeof(float));
90       assm->swc1(reg.fp(), MemOperand(sp, 0));
91       break;
92     case kWasmF64:
93       assm->addiu(sp, sp, -sizeof(double));
94       assm->Sdc1(reg.fp(), MemOperand(sp, 0));
95       break;
96     default:
97       UNREACHABLE();
98   }
99 }
100 
101 #if defined(V8_TARGET_BIG_ENDIAN)
ChangeEndiannessLoad(LiftoffAssembler * assm,LiftoffRegister dst,LoadType type,LiftoffRegList pinned)102 inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst,
103                                  LoadType type, LiftoffRegList pinned) {
104   bool is_float = false;
105   LiftoffRegister tmp = dst;
106   switch (type.value()) {
107     case LoadType::kI64Load8U:
108     case LoadType::kI64Load8S:
109       // Swap low and high registers.
110       assm->TurboAssembler::Move(kScratchReg, tmp.low_gp());
111       assm->TurboAssembler::Move(tmp.low_gp(), tmp.high_gp());
112       assm->TurboAssembler::Move(tmp.high_gp(), kScratchReg);
113       V8_FALLTHROUGH;
114     case LoadType::kI32Load8U:
115     case LoadType::kI32Load8S:
116       // No need to change endianness for byte size.
117       return;
118     case LoadType::kF32Load:
119       is_float = true;
120       tmp = assm->GetUnusedRegister(kGpReg, pinned);
121       assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, dst);
122       V8_FALLTHROUGH;
123     case LoadType::kI32Load:
124       assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
125       break;
126     case LoadType::kI32Load16S:
127       assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
128       break;
129     case LoadType::kI32Load16U:
130       assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2);
131       break;
132     case LoadType::kF64Load:
133       is_float = true;
134       tmp = assm->GetUnusedRegister(kGpRegPair, pinned);
135       assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, dst);
136       V8_FALLTHROUGH;
137     case LoadType::kI64Load:
138       assm->TurboAssembler::Move(kScratchReg, tmp.low_gp());
139       assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4);
140       assm->TurboAssembler::ByteSwapSigned(tmp.high_gp(), kScratchReg, 4);
141       break;
142     case LoadType::kI64Load16U:
143       assm->TurboAssembler::ByteSwapUnsigned(tmp.low_gp(), tmp.high_gp(), 2);
144       assm->TurboAssembler::Move(tmp.high_gp(), zero_reg);
145       break;
146     case LoadType::kI64Load16S:
147       assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 2);
148       assm->sra(tmp.high_gp(), tmp.high_gp(), 31);
149       break;
150     case LoadType::kI64Load32U:
151       assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4);
152       assm->TurboAssembler::Move(tmp.high_gp(), zero_reg);
153       break;
154     case LoadType::kI64Load32S:
155       assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4);
156       assm->sra(tmp.high_gp(), tmp.high_gp(), 31);
157       break;
158     default:
159       UNREACHABLE();
160   }
161 
162   if (is_float) {
163     switch (type.value()) {
164       case LoadType::kF32Load:
165         assm->emit_type_conversion(kExprF32ReinterpretI32, dst, tmp);
166         break;
167       case LoadType::kF64Load:
168         assm->emit_type_conversion(kExprF64ReinterpretI64, dst, tmp);
169         break;
170       default:
171         UNREACHABLE();
172     }
173   }
174 }
175 
ChangeEndiannessStore(LiftoffAssembler * assm,LiftoffRegister src,StoreType type,LiftoffRegList pinned)176 inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src,
177                                   StoreType type, LiftoffRegList pinned) {
178   bool is_float = false;
179   LiftoffRegister tmp = src;
180   switch (type.value()) {
181     case StoreType::kI64Store8:
182       // Swap low and high registers.
183       assm->TurboAssembler::Move(kScratchReg, tmp.low_gp());
184       assm->TurboAssembler::Move(tmp.low_gp(), tmp.high_gp());
185       assm->TurboAssembler::Move(tmp.high_gp(), kScratchReg);
186       V8_FALLTHROUGH;
187     case StoreType::kI32Store8:
188       // No need to change endianness for byte size.
189       return;
190     case StoreType::kF32Store:
191       is_float = true;
192       tmp = assm->GetUnusedRegister(kGpReg, pinned);
193       assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, src);
194       V8_FALLTHROUGH;
195     case StoreType::kI32Store:
196     case StoreType::kI32Store16:
197       assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
198       break;
199     case StoreType::kF64Store:
200       is_float = true;
201       tmp = assm->GetUnusedRegister(kGpRegPair, pinned);
202       assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, src);
203       V8_FALLTHROUGH;
204     case StoreType::kI64Store:
205     case StoreType::kI64Store32:
206     case StoreType::kI64Store16:
207       assm->TurboAssembler::Move(kScratchReg, tmp.low_gp());
208       assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4);
209       assm->TurboAssembler::ByteSwapSigned(tmp.high_gp(), kScratchReg, 4);
210       break;
211     default:
212       UNREACHABLE();
213   }
214 
215   if (is_float) {
216     switch (type.value()) {
217       case StoreType::kF32Store:
218         assm->emit_type_conversion(kExprF32ReinterpretI32, src, tmp);
219         break;
220       case StoreType::kF64Store:
221         assm->emit_type_conversion(kExprF64ReinterpretI64, src, tmp);
222         break;
223       default:
224         UNREACHABLE();
225     }
226   }
227 }
228 #endif  // V8_TARGET_BIG_ENDIAN
229 
230 }  // namespace liftoff
231 
PrepareStackFrame()232 int LiftoffAssembler::PrepareStackFrame() {
233   int offset = pc_offset();
234   // When constant that represents size of stack frame can't be represented
235   // as 16bit we need three instructions to add it to sp, so we reserve space
236   // for this case.
237   addiu(sp, sp, 0);
238   nop();
239   nop();
240   return offset;
241 }
242 
PatchPrepareStackFrame(int offset,uint32_t stack_slots)243 void LiftoffAssembler::PatchPrepareStackFrame(int offset,
244                                               uint32_t stack_slots) {
245   uint32_t bytes = liftoff::kConstantStackSpace + kStackSlotSize * stack_slots;
246   DCHECK_LE(bytes, kMaxInt);
247   // We can't run out of space, just pass anything big enough to not cause the
248   // assembler to try to grow the buffer.
249   constexpr int kAvailableSpace = 256;
250   TurboAssembler patching_assembler(nullptr, AssemblerOptions{},
251                                     buffer_ + offset, kAvailableSpace,
252                                     CodeObjectRequired::kNo);
253   // If bytes can be represented as 16bit, addiu will be generated and two
254   // nops will stay untouched. Otherwise, lui-ori sequence will load it to
255   // register and, as third instruction, addu will be generated.
256   patching_assembler.Addu(sp, sp, Operand(-bytes));
257 }
258 
FinishCode()259 void LiftoffAssembler::FinishCode() {}
260 
AbortCompilation()261 void LiftoffAssembler::AbortCompilation() {}
262 
LoadConstant(LiftoffRegister reg,WasmValue value,RelocInfo::Mode rmode)263 void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
264                                     RelocInfo::Mode rmode) {
265   switch (value.type()) {
266     case kWasmI32:
267       TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
268       break;
269     case kWasmI64: {
270       DCHECK(RelocInfo::IsNone(rmode));
271       int32_t low_word = value.to_i64();
272       int32_t high_word = value.to_i64() >> 32;
273       TurboAssembler::li(reg.low_gp(), Operand(low_word));
274       TurboAssembler::li(reg.high_gp(), Operand(high_word));
275       break;
276     }
277     case kWasmF32:
278       TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
279       break;
280     case kWasmF64:
281       TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
282       break;
283     default:
284       UNREACHABLE();
285   }
286 }
287 
LoadFromInstance(Register dst,uint32_t offset,int size)288 void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
289                                         int size) {
290   DCHECK_LE(offset, kMaxInt);
291   lw(dst, liftoff::GetInstanceOperand());
292   DCHECK_EQ(4, size);
293   lw(dst, MemOperand(dst, offset));
294 }
295 
SpillInstance(Register instance)296 void LiftoffAssembler::SpillInstance(Register instance) {
297   sw(instance, liftoff::GetInstanceOperand());
298 }
299 
FillInstanceInto(Register dst)300 void LiftoffAssembler::FillInstanceInto(Register dst) {
301   lw(dst, liftoff::GetInstanceOperand());
302 }
303 
Load(LiftoffRegister dst,Register src_addr,Register offset_reg,uint32_t offset_imm,LoadType type,LiftoffRegList pinned,uint32_t * protected_load_pc,bool is_load_mem)304 void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
305                             Register offset_reg, uint32_t offset_imm,
306                             LoadType type, LiftoffRegList pinned,
307                             uint32_t* protected_load_pc, bool is_load_mem) {
308   Register src = no_reg;
309   if (offset_reg != no_reg) {
310     src = GetUnusedRegister(kGpReg, pinned).gp();
311     emit_ptrsize_add(src, src_addr, offset_reg);
312   }
313   MemOperand src_op = (offset_reg != no_reg) ? MemOperand(src, offset_imm)
314                                              : MemOperand(src_addr, offset_imm);
315 
316   if (protected_load_pc) *protected_load_pc = pc_offset();
317   switch (type.value()) {
318     case LoadType::kI32Load8U:
319       lbu(dst.gp(), src_op);
320       break;
321     case LoadType::kI64Load8U:
322       lbu(dst.low_gp(), src_op);
323       xor_(dst.high_gp(), dst.high_gp(), dst.high_gp());
324       break;
325     case LoadType::kI32Load8S:
326       lb(dst.gp(), src_op);
327       break;
328     case LoadType::kI64Load8S:
329       lb(dst.low_gp(), src_op);
330       TurboAssembler::Move(dst.high_gp(), dst.low_gp());
331       sra(dst.high_gp(), dst.high_gp(), 31);
332       break;
333     case LoadType::kI32Load16U:
334       TurboAssembler::Ulhu(dst.gp(), src_op);
335       break;
336     case LoadType::kI64Load16U:
337       TurboAssembler::Ulhu(dst.low_gp(), src_op);
338       xor_(dst.high_gp(), dst.high_gp(), dst.high_gp());
339       break;
340     case LoadType::kI32Load16S:
341       TurboAssembler::Ulh(dst.gp(), src_op);
342       break;
343     case LoadType::kI64Load16S:
344       TurboAssembler::Ulh(dst.low_gp(), src_op);
345       TurboAssembler::Move(dst.high_gp(), dst.low_gp());
346       sra(dst.high_gp(), dst.high_gp(), 31);
347       break;
348     case LoadType::kI32Load:
349       TurboAssembler::Ulw(dst.gp(), src_op);
350       break;
351     case LoadType::kI64Load32U:
352       TurboAssembler::Ulw(dst.low_gp(), src_op);
353       xor_(dst.high_gp(), dst.high_gp(), dst.high_gp());
354       break;
355     case LoadType::kI64Load32S:
356       TurboAssembler::Ulw(dst.low_gp(), src_op);
357       TurboAssembler::Move(dst.high_gp(), dst.low_gp());
358       sra(dst.high_gp(), dst.high_gp(), 31);
359       break;
360     case LoadType::kI64Load: {
361       MemOperand src_op_upper = (offset_reg != no_reg)
362                                     ? MemOperand(src, offset_imm + 4)
363                                     : MemOperand(src_addr, offset_imm + 4);
364       TurboAssembler::Ulw(dst.high_gp(), src_op_upper);
365       TurboAssembler::Ulw(dst.low_gp(), src_op);
366       break;
367     }
368     case LoadType::kF32Load:
369       TurboAssembler::Ulwc1(dst.fp(), src_op, t8);
370       break;
371     case LoadType::kF64Load:
372       TurboAssembler::Uldc1(dst.fp(), src_op, t8);
373       break;
374     default:
375       UNREACHABLE();
376   }
377 
378 #if defined(V8_TARGET_BIG_ENDIAN)
379   if (is_load_mem) {
380     liftoff::ChangeEndiannessLoad(this, dst, type, pinned);
381   }
382 #endif
383 }
384 
Store(Register dst_addr,Register offset_reg,uint32_t offset_imm,LiftoffRegister src,StoreType type,LiftoffRegList pinned,uint32_t * protected_store_pc,bool is_store_mem)385 void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
386                              uint32_t offset_imm, LiftoffRegister src,
387                              StoreType type, LiftoffRegList pinned,
388                              uint32_t* protected_store_pc, bool is_store_mem) {
389   Register dst = no_reg;
390   if (offset_reg != no_reg) {
391     dst = GetUnusedRegister(kGpReg, pinned).gp();
392     emit_ptrsize_add(dst, dst_addr, offset_reg);
393   }
394   MemOperand dst_op = (offset_reg != no_reg) ? MemOperand(dst, offset_imm)
395                                              : MemOperand(dst_addr, offset_imm);
396 
397 #if defined(V8_TARGET_BIG_ENDIAN)
398   if (is_store_mem) {
399     LiftoffRegister tmp = GetUnusedRegister(src.reg_class(), pinned);
400     // Save original value.
401     Move(tmp, src, type.value_type());
402 
403     src = tmp;
404     pinned.set(tmp);
405     liftoff::ChangeEndiannessStore(this, src, type, pinned);
406   }
407 #endif
408 
409   if (protected_store_pc) *protected_store_pc = pc_offset();
410   switch (type.value()) {
411     case StoreType::kI64Store8:
412       src = src.low();
413       V8_FALLTHROUGH;
414     case StoreType::kI32Store8:
415       sb(src.gp(), dst_op);
416       break;
417     case StoreType::kI64Store16:
418       src = src.low();
419       V8_FALLTHROUGH;
420     case StoreType::kI32Store16:
421       TurboAssembler::Ush(src.gp(), dst_op, t8);
422       break;
423     case StoreType::kI64Store32:
424       src = src.low();
425       V8_FALLTHROUGH;
426     case StoreType::kI32Store:
427       TurboAssembler::Usw(src.gp(), dst_op);
428       break;
429     case StoreType::kI64Store: {
430       MemOperand dst_op_upper = (offset_reg != no_reg)
431                                     ? MemOperand(dst, offset_imm + 4)
432                                     : MemOperand(dst_addr, offset_imm + 4);
433       TurboAssembler::Usw(src.high_gp(), dst_op_upper);
434       TurboAssembler::Usw(src.low_gp(), dst_op);
435       break;
436     }
437     case StoreType::kF32Store:
438       TurboAssembler::Uswc1(src.fp(), dst_op, t8);
439       break;
440     case StoreType::kF64Store:
441       TurboAssembler::Usdc1(src.fp(), dst_op, t8);
442       break;
443     default:
444       UNREACHABLE();
445   }
446 }
447 
LoadCallerFrameSlot(LiftoffRegister dst,uint32_t caller_slot_idx,ValueType type)448 void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
449                                            uint32_t caller_slot_idx,
450                                            ValueType type) {
451   int32_t offset = kPointerSize * (caller_slot_idx + 1);
452   liftoff::Load(this, dst, fp, offset, type);
453 }
454 
MoveStackValue(uint32_t dst_index,uint32_t src_index,ValueType type)455 void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
456                                       ValueType type) {
457   DCHECK_NE(dst_index, src_index);
458   LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
459   Fill(reg, src_index, type);
460   Spill(dst_index, reg, type);
461 }
462 
Move(Register dst,Register src,ValueType type)463 void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
464   DCHECK_NE(dst, src);
465   TurboAssembler::mov(dst, src);
466 }
467 
Move(DoubleRegister dst,DoubleRegister src,ValueType type)468 void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
469                             ValueType type) {
470   DCHECK_NE(dst, src);
471   TurboAssembler::Move(dst, src);
472 }
473 
Spill(uint32_t index,LiftoffRegister reg,ValueType type)474 void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
475                              ValueType type) {
476   RecordUsedSpillSlot(index);
477   MemOperand dst = liftoff::GetStackSlot(index);
478   switch (type) {
479     case kWasmI32:
480       sw(reg.gp(), dst);
481       break;
482     case kWasmI64:
483       sw(reg.low_gp(), dst);
484       sw(reg.high_gp(), liftoff::GetHalfStackSlot(2 * index + 1));
485       break;
486     case kWasmF32:
487       swc1(reg.fp(), dst);
488       break;
489     case kWasmF64:
490       TurboAssembler::Sdc1(reg.fp(), dst);
491       break;
492     default:
493       UNREACHABLE();
494   }
495 }
496 
Spill(uint32_t index,WasmValue value)497 void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
498   RecordUsedSpillSlot(index);
499   MemOperand dst = liftoff::GetStackSlot(index);
500   switch (value.type()) {
501     case kWasmI32: {
502       LiftoffRegister tmp = GetUnusedRegister(kGpReg);
503       TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
504       sw(tmp.gp(), dst);
505       break;
506     }
507     case kWasmI64: {
508       LiftoffRegister tmp = GetUnusedRegister(kGpRegPair);
509 
510       int32_t low_word = value.to_i64();
511       int32_t high_word = value.to_i64() >> 32;
512       TurboAssembler::li(tmp.low_gp(), Operand(low_word));
513       TurboAssembler::li(tmp.high_gp(), Operand(high_word));
514 
515       sw(tmp.low_gp(), dst);
516       sw(tmp.high_gp(), liftoff::GetHalfStackSlot(2 * index + 1));
517       break;
518     }
519     default:
520       // kWasmF32 and kWasmF64 are unreachable, since those
521       // constants are not tracked.
522       UNREACHABLE();
523   }
524 }
525 
Fill(LiftoffRegister reg,uint32_t index,ValueType type)526 void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
527                             ValueType type) {
528   MemOperand src = liftoff::GetStackSlot(index);
529   switch (type) {
530     case kWasmI32:
531       lw(reg.gp(), src);
532       break;
533     case kWasmI64:
534       lw(reg.low_gp(), src);
535       lw(reg.high_gp(), liftoff::GetHalfStackSlot(2 * index + 1));
536       break;
537     case kWasmF32:
538       lwc1(reg.fp(), src);
539       break;
540     case kWasmF64:
541       TurboAssembler::Ldc1(reg.fp(), src);
542       break;
543     default:
544       UNREACHABLE();
545   }
546 }
547 
FillI64Half(Register reg,uint32_t half_index)548 void LiftoffAssembler::FillI64Half(Register reg, uint32_t half_index) {
549   lw(reg, liftoff::GetHalfStackSlot(half_index));
550 }
551 
emit_i32_mul(Register dst,Register lhs,Register rhs)552 void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
553   TurboAssembler::Mul(dst, lhs, rhs);
554 }
555 
emit_i32_divs(Register dst,Register lhs,Register rhs,Label * trap_div_by_zero,Label * trap_div_unrepresentable)556 void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
557                                      Label* trap_div_by_zero,
558                                      Label* trap_div_unrepresentable) {
559   TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
560 
561   // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable.
562   TurboAssembler::li(kScratchReg, 1);
563   TurboAssembler::li(kScratchReg2, 1);
564   TurboAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq);
565   TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq);
566   addu(kScratchReg, kScratchReg, kScratchReg2);
567   TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
568                          Operand(zero_reg));
569 
570   TurboAssembler::Div(dst, lhs, rhs);
571 }
572 
emit_i32_divu(Register dst,Register lhs,Register rhs,Label * trap_div_by_zero)573 void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
574                                      Label* trap_div_by_zero) {
575   TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
576   TurboAssembler::Divu(dst, lhs, rhs);
577 }
578 
emit_i32_rems(Register dst,Register lhs,Register rhs,Label * trap_div_by_zero)579 void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
580                                      Label* trap_div_by_zero) {
581   TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
582   TurboAssembler::Mod(dst, lhs, rhs);
583 }
584 
emit_i32_remu(Register dst,Register lhs,Register rhs,Label * trap_div_by_zero)585 void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
586                                      Label* trap_div_by_zero) {
587   TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
588   TurboAssembler::Modu(dst, lhs, rhs);
589 }
590 
591 #define I32_BINOP(name, instruction)                                 \
592   void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
593                                          Register rhs) {             \
594     instruction(dst, lhs, rhs);                                      \
595   }
596 
597 // clang-format off
I32_BINOP(add,addu)598 I32_BINOP(add, addu)
599 I32_BINOP(sub, subu)
600 I32_BINOP(and, and_)
601 I32_BINOP(or, or_)
602 I32_BINOP(xor, xor_)
603 // clang-format on
604 
605 #undef I32_BINOP
606 
607 bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
608   TurboAssembler::Clz(dst, src);
609   return true;
610 }
611 
emit_i32_ctz(Register dst,Register src)612 bool LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
613   TurboAssembler::Ctz(dst, src);
614   return true;
615 }
616 
emit_i32_popcnt(Register dst,Register src)617 bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
618   TurboAssembler::Popcnt(dst, src);
619   return true;
620 }
621 
622 #define I32_SHIFTOP(name, instruction)                                      \
623   void LiftoffAssembler::emit_i32_##name(                                   \
624       Register dst, Register src, Register amount, LiftoffRegList pinned) { \
625     instruction(dst, src, amount);                                          \
626   }
627 
I32_SHIFTOP(shl,sllv)628 I32_SHIFTOP(shl, sllv)
629 I32_SHIFTOP(sar, srav)
630 I32_SHIFTOP(shr, srlv)
631 
632 #undef I32_SHIFTOP
633 
634 void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
635                                     LiftoffRegister rhs) {
636   TurboAssembler::MulPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
637                           lhs.high_gp(), rhs.low_gp(), rhs.high_gp(),
638                           kScratchReg, kScratchReg2);
639 }
640 
emit_i64_divs(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs,Label * trap_div_by_zero,Label * trap_div_unrepresentable)641 bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
642                                      LiftoffRegister rhs,
643                                      Label* trap_div_by_zero,
644                                      Label* trap_div_unrepresentable) {
645   return false;
646 }
647 
emit_i64_divu(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs,Label * trap_div_by_zero)648 bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
649                                      LiftoffRegister rhs,
650                                      Label* trap_div_by_zero) {
651   return false;
652 }
653 
emit_i64_rems(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs,Label * trap_div_by_zero)654 bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
655                                      LiftoffRegister rhs,
656                                      Label* trap_div_by_zero) {
657   return false;
658 }
659 
emit_i64_remu(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs,Label * trap_div_by_zero)660 bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
661                                      LiftoffRegister rhs,
662                                      Label* trap_div_by_zero) {
663   return false;
664 }
665 
emit_i64_add(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs)666 void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
667                                     LiftoffRegister rhs) {
668   TurboAssembler::AddPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
669                           lhs.high_gp(), rhs.low_gp(), rhs.high_gp(),
670                           kScratchReg, kScratchReg2);
671 }
672 
emit_i64_sub(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs)673 void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
674                                     LiftoffRegister rhs) {
675   TurboAssembler::SubPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
676                           lhs.high_gp(), rhs.low_gp(), rhs.high_gp(),
677                           kScratchReg, kScratchReg2);
678 }
679 
680 namespace liftoff {
681 
IsRegInRegPair(LiftoffRegister pair,Register reg)682 inline bool IsRegInRegPair(LiftoffRegister pair, Register reg) {
683   DCHECK(pair.is_pair());
684   return pair.low_gp() == reg || pair.high_gp() == reg;
685 }
686 
Emit64BitShiftOperation(LiftoffAssembler * assm,LiftoffRegister dst,LiftoffRegister src,Register amount,void (TurboAssembler::* emit_shift)(Register,Register,Register,Register,Register,Register,Register),LiftoffRegList pinned)687 inline void Emit64BitShiftOperation(
688     LiftoffAssembler* assm, LiftoffRegister dst, LiftoffRegister src,
689     Register amount,
690     void (TurboAssembler::*emit_shift)(Register, Register, Register, Register,
691                                        Register, Register, Register),
692     LiftoffRegList pinned) {
693   Label move, done;
694   pinned.set(dst);
695   pinned.set(src);
696   pinned.set(amount);
697 
698   // If some of destination registers are in use, get another, unused pair.
699   // That way we prevent overwriting some input registers while shifting.
700   // Do this before any branch so that the cache state will be correct for
701   // all conditions.
702   LiftoffRegister tmp = assm->GetUnusedRegister(kGpRegPair, pinned);
703 
704   // If shift amount is 0, don't do the shifting.
705   assm->TurboAssembler::Branch(&move, eq, amount, Operand(zero_reg));
706 
707   if (liftoff::IsRegInRegPair(dst, amount) || dst.overlaps(src)) {
708     // Do the actual shift.
709     (assm->*emit_shift)(tmp.low_gp(), tmp.high_gp(), src.low_gp(),
710                         src.high_gp(), amount, kScratchReg, kScratchReg2);
711 
712     // Place result in destination register.
713     assm->TurboAssembler::Move(dst.high_gp(), tmp.high_gp());
714     assm->TurboAssembler::Move(dst.low_gp(), tmp.low_gp());
715   } else {
716     (assm->*emit_shift)(dst.low_gp(), dst.high_gp(), src.low_gp(),
717                         src.high_gp(), amount, kScratchReg, kScratchReg2);
718   }
719   assm->TurboAssembler::Branch(&done);
720 
721   // If shift amount is 0, move src to dst.
722   assm->bind(&move);
723   assm->TurboAssembler::Move(dst.high_gp(), src.high_gp());
724   assm->TurboAssembler::Move(dst.low_gp(), src.low_gp());
725 
726   assm->bind(&done);
727 }
728 }  // namespace liftoff
729 
emit_i64_shl(LiftoffRegister dst,LiftoffRegister src,Register amount,LiftoffRegList pinned)730 void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
731                                     Register amount, LiftoffRegList pinned) {
732   liftoff::Emit64BitShiftOperation(this, dst, src, amount,
733                                    &TurboAssembler::ShlPair, pinned);
734 }
735 
emit_i64_sar(LiftoffRegister dst,LiftoffRegister src,Register amount,LiftoffRegList pinned)736 void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
737                                     Register amount, LiftoffRegList pinned) {
738   liftoff::Emit64BitShiftOperation(this, dst, src, amount,
739                                    &TurboAssembler::SarPair, pinned);
740 }
741 
emit_i64_shr(LiftoffRegister dst,LiftoffRegister src,Register amount,LiftoffRegList pinned)742 void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
743                                     Register amount, LiftoffRegList pinned) {
744   liftoff::Emit64BitShiftOperation(this, dst, src, amount,
745                                    &TurboAssembler::ShrPair, pinned);
746 }
747 
emit_i32_to_intptr(Register dst,Register src)748 void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
749   // This is a nop on mips32.
750 }
751 
emit_f32_neg(DoubleRegister dst,DoubleRegister src)752 void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
753   TurboAssembler::Neg_s(dst, src);
754 }
755 
emit_f64_neg(DoubleRegister dst,DoubleRegister src)756 void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
757   TurboAssembler::Neg_d(dst, src);
758 }
759 
emit_f32_min(DoubleRegister dst,DoubleRegister lhs,DoubleRegister rhs)760 void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
761                                     DoubleRegister rhs) {
762   Label ool, done;
763   TurboAssembler::Float32Min(dst, lhs, rhs, &ool);
764   Branch(&done);
765 
766   bind(&ool);
767   TurboAssembler::Float32MinOutOfLine(dst, lhs, rhs);
768   bind(&done);
769 }
770 
emit_f32_max(DoubleRegister dst,DoubleRegister lhs,DoubleRegister rhs)771 void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
772                                     DoubleRegister rhs) {
773   Label ool, done;
774   TurboAssembler::Float32Max(dst, lhs, rhs, &ool);
775   Branch(&done);
776 
777   bind(&ool);
778   TurboAssembler::Float32MaxOutOfLine(dst, lhs, rhs);
779   bind(&done);
780 }
781 
emit_f64_min(DoubleRegister dst,DoubleRegister lhs,DoubleRegister rhs)782 void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
783                                     DoubleRegister rhs) {
784   Label ool, done;
785   TurboAssembler::Float64Min(dst, lhs, rhs, &ool);
786   Branch(&done);
787 
788   bind(&ool);
789   TurboAssembler::Float64MinOutOfLine(dst, lhs, rhs);
790   bind(&done);
791 }
792 
emit_f64_max(DoubleRegister dst,DoubleRegister lhs,DoubleRegister rhs)793 void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
794                                     DoubleRegister rhs) {
795   Label ool, done;
796   TurboAssembler::Float64Max(dst, lhs, rhs, &ool);
797   Branch(&done);
798 
799   bind(&ool);
800   TurboAssembler::Float64MaxOutOfLine(dst, lhs, rhs);
801   bind(&done);
802 }
803 
804 #define FP_BINOP(name, instruction)                                          \
805   void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
806                                      DoubleRegister rhs) {                   \
807     instruction(dst, lhs, rhs);                                              \
808   }
809 #define FP_UNOP(name, instruction)                                             \
810   void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
811     instruction(dst, src);                                                     \
812   }
813 
FP_BINOP(f32_add,add_s)814 FP_BINOP(f32_add, add_s)
815 FP_BINOP(f32_sub, sub_s)
816 FP_BINOP(f32_mul, mul_s)
817 FP_BINOP(f32_div, div_s)
818 FP_UNOP(f32_abs, abs_s)
819 FP_UNOP(f32_ceil, Ceil_s_s)
820 FP_UNOP(f32_floor, Floor_s_s)
821 FP_UNOP(f32_trunc, Trunc_s_s)
822 FP_UNOP(f32_nearest_int, Round_s_s)
823 FP_UNOP(f32_sqrt, sqrt_s)
824 FP_BINOP(f64_add, add_d)
825 FP_BINOP(f64_sub, sub_d)
826 FP_BINOP(f64_mul, mul_d)
827 FP_BINOP(f64_div, div_d)
828 FP_UNOP(f64_abs, abs_d)
829 FP_UNOP(f64_sqrt, sqrt_d)
830 
831 #undef FP_BINOP
832 #undef FP_UNOP
833 
834 bool LiftoffAssembler::emit_f64_ceil(DoubleRegister dst, DoubleRegister src) {
835   if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
836       IsFp64Mode()) {
837     Ceil_d_d(dst, src);
838     return true;
839   }
840   return false;
841 }
842 
emit_f64_floor(DoubleRegister dst,DoubleRegister src)843 bool LiftoffAssembler::emit_f64_floor(DoubleRegister dst, DoubleRegister src) {
844   if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
845       IsFp64Mode()) {
846     Floor_d_d(dst, src);
847     return true;
848   }
849   return false;
850 }
851 
emit_f64_trunc(DoubleRegister dst,DoubleRegister src)852 bool LiftoffAssembler::emit_f64_trunc(DoubleRegister dst, DoubleRegister src) {
853   if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
854       IsFp64Mode()) {
855     Trunc_d_d(dst, src);
856     return true;
857   }
858   return false;
859 }
860 
emit_f64_nearest_int(DoubleRegister dst,DoubleRegister src)861 bool LiftoffAssembler::emit_f64_nearest_int(DoubleRegister dst,
862                                             DoubleRegister src) {
863   if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
864       IsFp64Mode()) {
865     Round_d_d(dst, src);
866     return true;
867   }
868   return false;
869 }
870 
emit_type_conversion(WasmOpcode opcode,LiftoffRegister dst,LiftoffRegister src,Label * trap)871 bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
872                                             LiftoffRegister dst,
873                                             LiftoffRegister src, Label* trap) {
874   switch (opcode) {
875     case kExprI32ConvertI64:
876       TurboAssembler::Move(dst.gp(), src.low_gp());
877       return true;
878     case kExprI32SConvertF32: {
879       LiftoffRegister rounded =
880           GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
881       LiftoffRegister converted_back =
882           GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
883 
884       // Real conversion.
885       TurboAssembler::Trunc_s_s(rounded.fp(), src.fp());
886       trunc_w_s(kScratchDoubleReg, rounded.fp());
887       mfc1(dst.gp(), kScratchDoubleReg);
888       // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
889       // because INT32_MIN allows easier out-of-bounds detection.
890       TurboAssembler::Addu(kScratchReg, dst.gp(), 1);
891       TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
892       TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
893 
894       // Checking if trap.
895       mtc1(dst.gp(), kScratchDoubleReg);
896       cvt_s_w(converted_back.fp(), kScratchDoubleReg);
897       TurboAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp());
898       TurboAssembler::BranchFalseF(trap);
899       return true;
900     }
901     case kExprI32UConvertF32: {
902       LiftoffRegister rounded =
903           GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
904       LiftoffRegister converted_back =
905           GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
906 
907       // Real conversion.
908       TurboAssembler::Trunc_s_s(rounded.fp(), src.fp());
909       TurboAssembler::Trunc_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg);
910       // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
911       // because 0 allows easier out-of-bounds detection.
912       TurboAssembler::Addu(kScratchReg, dst.gp(), 1);
913       TurboAssembler::Movz(dst.gp(), zero_reg, kScratchReg);
914 
915       // Checking if trap.
916       TurboAssembler::Cvt_d_uw(converted_back.fp(), dst.gp(),
917                                kScratchDoubleReg);
918       cvt_s_d(converted_back.fp(), converted_back.fp());
919       TurboAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp());
920       TurboAssembler::BranchFalseF(trap);
921       return true;
922     }
923     case kExprI32SConvertF64: {
924       if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
925           IsFp64Mode()) {
926         LiftoffRegister rounded =
927             GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
928         LiftoffRegister converted_back =
929             GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
930 
931         // Real conversion.
932         TurboAssembler::Trunc_d_d(rounded.fp(), src.fp());
933         TurboAssembler::Trunc_w_d(kScratchDoubleReg, rounded.fp());
934         mfc1(dst.gp(), kScratchDoubleReg);
935 
936         // Checking if trap.
937         cvt_d_w(converted_back.fp(), kScratchDoubleReg);
938         TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
939         TurboAssembler::BranchFalseF(trap);
940         return true;
941       } else {
942         BAILOUT("emit_type_conversion kExprI32SConvertF64");
943         return true;
944       }
945     }
946     case kExprI32UConvertF64: {
947       if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
948           IsFp64Mode()) {
949         LiftoffRegister rounded =
950             GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
951         LiftoffRegister converted_back =
952             GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
953 
954         // Real conversion.
955         TurboAssembler::Trunc_d_d(rounded.fp(), src.fp());
956         TurboAssembler::Trunc_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg);
957 
958         // Checking if trap.
959         TurboAssembler::Cvt_d_uw(converted_back.fp(), dst.gp(),
960                                  kScratchDoubleReg);
961         TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
962         TurboAssembler::BranchFalseF(trap);
963         return true;
964       } else {
965         BAILOUT("emit_type_conversion kExprI32UConvertF64");
966         return true;
967       }
968     }
969     case kExprI32ReinterpretF32:
970       mfc1(dst.gp(), src.fp());
971       return true;
972     case kExprI64SConvertI32:
973       TurboAssembler::Move(dst.low_gp(), src.gp());
974       TurboAssembler::Move(dst.high_gp(), src.gp());
975       sra(dst.high_gp(), dst.high_gp(), 31);
976       return true;
977     case kExprI64UConvertI32:
978       TurboAssembler::Move(dst.low_gp(), src.gp());
979       TurboAssembler::Move(dst.high_gp(), zero_reg);
980       return true;
981     case kExprI64ReinterpretF64:
982       mfc1(dst.low_gp(), src.fp());
983       TurboAssembler::Mfhc1(dst.high_gp(), src.fp());
984       return true;
985     case kExprF32SConvertI32: {
986       LiftoffRegister scratch =
987           GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
988       mtc1(src.gp(), scratch.fp());
989       cvt_s_w(dst.fp(), scratch.fp());
990       return true;
991     }
992     case kExprF32UConvertI32: {
993       LiftoffRegister scratch =
994           GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
995       TurboAssembler::Cvt_d_uw(dst.fp(), src.gp(), scratch.fp());
996       cvt_s_d(dst.fp(), dst.fp());
997       return true;
998     }
999     case kExprF32ConvertF64:
1000       cvt_s_d(dst.fp(), src.fp());
1001       return true;
1002     case kExprF32ReinterpretI32:
1003       TurboAssembler::FmoveLow(dst.fp(), src.gp());
1004       return true;
1005     case kExprF64SConvertI32: {
1006       LiftoffRegister scratch =
1007           GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
1008       mtc1(src.gp(), scratch.fp());
1009       cvt_d_w(dst.fp(), scratch.fp());
1010       return true;
1011     }
1012     case kExprF64UConvertI32: {
1013       LiftoffRegister scratch =
1014           GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
1015       TurboAssembler::Cvt_d_uw(dst.fp(), src.gp(), scratch.fp());
1016       return true;
1017     }
1018     case kExprF64ConvertF32:
1019       cvt_d_s(dst.fp(), src.fp());
1020       return true;
1021     case kExprF64ReinterpretI64:
1022       mtc1(src.low_gp(), dst.fp());
1023       TurboAssembler::Mthc1(src.high_gp(), dst.fp());
1024       return true;
1025     default:
1026       return false;
1027   }
1028 }
1029 
emit_jump(Label * label)1030 void LiftoffAssembler::emit_jump(Label* label) {
1031   TurboAssembler::Branch(label);
1032 }
1033 
emit_jump(Register target)1034 void LiftoffAssembler::emit_jump(Register target) {
1035   TurboAssembler::Jump(target);
1036 }
1037 
emit_cond_jump(Condition cond,Label * label,ValueType type,Register lhs,Register rhs)1038 void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
1039                                       ValueType type, Register lhs,
1040                                       Register rhs) {
1041   if (rhs != no_reg) {
1042     TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
1043   } else {
1044     TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
1045   }
1046 }
1047 
emit_i32_eqz(Register dst,Register src)1048 void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
1049   sltiu(dst, src, 1);
1050 }
1051 
emit_i32_set_cond(Condition cond,Register dst,Register lhs,Register rhs)1052 void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
1053                                          Register lhs, Register rhs) {
1054   Register tmp = dst;
1055   if (dst == lhs || dst == rhs) {
1056     tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
1057   }
1058   // Write 1 as result.
1059   TurboAssembler::li(tmp, 1);
1060 
1061   // If negative condition is true, write 0 as result.
1062   Condition neg_cond = NegateCondition(cond);
1063   TurboAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond);
1064 
1065   // If tmp != dst, result will be moved.
1066   TurboAssembler::Move(dst, tmp);
1067 }
1068 
emit_i64_eqz(Register dst,LiftoffRegister src)1069 void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
1070   Register tmp =
1071       GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(src, dst)).gp();
1072   sltiu(tmp, src.low_gp(), 1);
1073   sltiu(dst, src.high_gp(), 1);
1074   and_(dst, dst, tmp);
1075 }
1076 
1077 namespace liftoff {
cond_make_unsigned(Condition cond)1078 inline Condition cond_make_unsigned(Condition cond) {
1079   switch (cond) {
1080     case kSignedLessThan:
1081       return kUnsignedLessThan;
1082     case kSignedLessEqual:
1083       return kUnsignedLessEqual;
1084     case kSignedGreaterThan:
1085       return kUnsignedGreaterThan;
1086     case kSignedGreaterEqual:
1087       return kUnsignedGreaterEqual;
1088     default:
1089       return cond;
1090   }
1091 }
1092 }  // namespace liftoff
1093 
emit_i64_set_cond(Condition cond,Register dst,LiftoffRegister lhs,LiftoffRegister rhs)1094 void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
1095                                          LiftoffRegister lhs,
1096                                          LiftoffRegister rhs) {
1097   Label low, cont;
1098 
1099   // For signed i64 comparisons, we still need to use unsigned comparison for
1100   // the low word (the only bit carrying signedness information is the MSB in
1101   // the high word).
1102   Condition unsigned_cond = liftoff::cond_make_unsigned(cond);
1103 
1104   Register tmp = dst;
1105   if (liftoff::IsRegInRegPair(lhs, dst) || liftoff::IsRegInRegPair(rhs, dst)) {
1106     tmp =
1107         GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst, lhs, rhs)).gp();
1108   }
1109 
1110   // Write 1 initially in tmp register.
1111   TurboAssembler::li(tmp, 1);
1112 
1113   // If high words are equal, then compare low words, else compare high.
1114   Branch(&low, eq, lhs.high_gp(), Operand(rhs.high_gp()));
1115 
1116   TurboAssembler::LoadZeroOnCondition(
1117       tmp, lhs.high_gp(), Operand(rhs.high_gp()), NegateCondition(cond));
1118   Branch(&cont);
1119 
1120   bind(&low);
1121   TurboAssembler::LoadZeroOnCondition(tmp, lhs.low_gp(), Operand(rhs.low_gp()),
1122                                       NegateCondition(unsigned_cond));
1123 
1124   bind(&cont);
1125   // Move result to dst register if needed.
1126   TurboAssembler::Move(dst, tmp);
1127 }
1128 
1129 namespace liftoff {
1130 
ConditionToConditionCmpFPU(bool & predicate,Condition condition)1131 inline FPUCondition ConditionToConditionCmpFPU(bool& predicate,
1132                                                Condition condition) {
1133   switch (condition) {
1134     case kEqual:
1135       predicate = true;
1136       return EQ;
1137     case kUnequal:
1138       predicate = false;
1139       return EQ;
1140     case kUnsignedLessThan:
1141       predicate = true;
1142       return OLT;
1143     case kUnsignedGreaterEqual:
1144       predicate = false;
1145       return OLT;
1146     case kUnsignedLessEqual:
1147       predicate = true;
1148       return OLE;
1149     case kUnsignedGreaterThan:
1150       predicate = false;
1151       return OLE;
1152     default:
1153       predicate = true;
1154       break;
1155   }
1156   UNREACHABLE();
1157 }
1158 
1159 };  // namespace liftoff
1160 
emit_f32_set_cond(Condition cond,Register dst,DoubleRegister lhs,DoubleRegister rhs)1161 void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
1162                                          DoubleRegister lhs,
1163                                          DoubleRegister rhs) {
1164   Label not_nan, cont;
1165   TurboAssembler::CompareIsNanF32(lhs, rhs);
1166   TurboAssembler::BranchFalseF(&not_nan);
1167   // If one of the operands is NaN, return 1 for f32.ne, else 0.
1168   if (cond == ne) {
1169     TurboAssembler::li(dst, 1);
1170   } else {
1171     TurboAssembler::Move(dst, zero_reg);
1172   }
1173   TurboAssembler::Branch(&cont);
1174 
1175   bind(&not_nan);
1176 
1177   TurboAssembler::li(dst, 1);
1178   bool predicate;
1179   FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(predicate, cond);
1180   TurboAssembler::CompareF32(fcond, lhs, rhs);
1181   if (predicate) {
1182     TurboAssembler::LoadZeroIfNotFPUCondition(dst);
1183   } else {
1184     TurboAssembler::LoadZeroIfFPUCondition(dst);
1185   }
1186 
1187   bind(&cont);
1188 }
1189 
emit_f64_set_cond(Condition cond,Register dst,DoubleRegister lhs,DoubleRegister rhs)1190 void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
1191                                          DoubleRegister lhs,
1192                                          DoubleRegister rhs) {
1193   Label not_nan, cont;
1194   TurboAssembler::CompareIsNanF64(lhs, rhs);
1195   TurboAssembler::BranchFalseF(&not_nan);
1196   // If one of the operands is NaN, return 1 for f64.ne, else 0.
1197   if (cond == ne) {
1198     TurboAssembler::li(dst, 1);
1199   } else {
1200     TurboAssembler::Move(dst, zero_reg);
1201   }
1202   TurboAssembler::Branch(&cont);
1203 
1204   bind(&not_nan);
1205 
1206   TurboAssembler::li(dst, 1);
1207   bool predicate;
1208   FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(predicate, cond);
1209   TurboAssembler::CompareF64(fcond, lhs, rhs);
1210   if (predicate) {
1211     TurboAssembler::LoadZeroIfNotFPUCondition(dst);
1212   } else {
1213     TurboAssembler::LoadZeroIfFPUCondition(dst);
1214   }
1215 
1216   bind(&cont);
1217 }
1218 
StackCheck(Label * ool_code,Register limit_address)1219 void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
1220   TurboAssembler::Ulw(limit_address, MemOperand(limit_address));
1221   TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address));
1222 }
1223 
CallTrapCallbackForTesting()1224 void LiftoffAssembler::CallTrapCallbackForTesting() {
1225   PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
1226   CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
1227 }
1228 
AssertUnreachable(AbortReason reason)1229 void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
1230   if (emit_debug_code()) Abort(reason);
1231 }
1232 
PushRegisters(LiftoffRegList regs)1233 void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
1234   LiftoffRegList gp_regs = regs & kGpCacheRegList;
1235   unsigned num_gp_regs = gp_regs.GetNumRegsSet();
1236   if (num_gp_regs) {
1237     unsigned offset = num_gp_regs * kPointerSize;
1238     addiu(sp, sp, -offset);
1239     while (!gp_regs.is_empty()) {
1240       LiftoffRegister reg = gp_regs.GetFirstRegSet();
1241       offset -= kPointerSize;
1242       sw(reg.gp(), MemOperand(sp, offset));
1243       gp_regs.clear(reg);
1244     }
1245     DCHECK_EQ(offset, 0);
1246   }
1247   LiftoffRegList fp_regs = regs & kFpCacheRegList;
1248   unsigned num_fp_regs = fp_regs.GetNumRegsSet();
1249   if (num_fp_regs) {
1250     addiu(sp, sp, -(num_fp_regs * kStackSlotSize));
1251     unsigned offset = 0;
1252     while (!fp_regs.is_empty()) {
1253       LiftoffRegister reg = fp_regs.GetFirstRegSet();
1254       TurboAssembler::Sdc1(reg.fp(), MemOperand(sp, offset));
1255       fp_regs.clear(reg);
1256       offset += sizeof(double);
1257     }
1258     DCHECK_EQ(offset, num_fp_regs * sizeof(double));
1259   }
1260 }
1261 
PopRegisters(LiftoffRegList regs)1262 void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
1263   LiftoffRegList fp_regs = regs & kFpCacheRegList;
1264   unsigned fp_offset = 0;
1265   while (!fp_regs.is_empty()) {
1266     LiftoffRegister reg = fp_regs.GetFirstRegSet();
1267     TurboAssembler::Ldc1(reg.fp(), MemOperand(sp, fp_offset));
1268     fp_regs.clear(reg);
1269     fp_offset += sizeof(double);
1270   }
1271   if (fp_offset) addiu(sp, sp, fp_offset);
1272   LiftoffRegList gp_regs = regs & kGpCacheRegList;
1273   unsigned gp_offset = 0;
1274   while (!gp_regs.is_empty()) {
1275     LiftoffRegister reg = gp_regs.GetLastRegSet();
1276     lw(reg.gp(), MemOperand(sp, gp_offset));
1277     gp_regs.clear(reg);
1278     gp_offset += kPointerSize;
1279   }
1280   addiu(sp, sp, gp_offset);
1281 }
1282 
DropStackSlotsAndRet(uint32_t num_stack_slots)1283 void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
1284   DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize);  // 16 bit immediate
1285   TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots));
1286 }
1287 
CallC(wasm::FunctionSig * sig,const LiftoffRegister * args,const LiftoffRegister * rets,ValueType out_argument_type,int stack_bytes,ExternalReference ext_ref)1288 void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
1289                              const LiftoffRegister* args,
1290                              const LiftoffRegister* rets,
1291                              ValueType out_argument_type, int stack_bytes,
1292                              ExternalReference ext_ref) {
1293   addiu(sp, sp, -stack_bytes);
1294 
1295   int arg_bytes = 0;
1296   for (ValueType param_type : sig->parameters()) {
1297     liftoff::Store(this, sp, arg_bytes, *args++, param_type);
1298     arg_bytes += ValueTypes::MemSize(param_type);
1299   }
1300   DCHECK_LE(arg_bytes, stack_bytes);
1301 
1302   // Pass a pointer to the buffer with the arguments to the C function.
1303   // On mips, the first argument is passed in {a0}.
1304   constexpr Register kFirstArgReg = a0;
1305   mov(kFirstArgReg, sp);
1306 
1307   // Now call the C function.
1308   constexpr int kNumCCallArgs = 1;
1309   PrepareCallCFunction(kNumCCallArgs, kScratchReg);
1310   CallCFunction(ext_ref, kNumCCallArgs);
1311 
1312   // Move return value to the right register.
1313   const LiftoffRegister* next_result_reg = rets;
1314   if (sig->return_count() > 0) {
1315     DCHECK_EQ(1, sig->return_count());
1316     constexpr Register kReturnReg = v0;
1317     if (kReturnReg != next_result_reg->gp()) {
1318       Move(*next_result_reg, LiftoffRegister(kReturnReg), sig->GetReturn(0));
1319     }
1320     ++next_result_reg;
1321   }
1322 
1323   // Load potential output value from the buffer on the stack.
1324   if (out_argument_type != kWasmStmt) {
1325     liftoff::Load(this, *next_result_reg, sp, 0, out_argument_type);
1326   }
1327 
1328   addiu(sp, sp, stack_bytes);
1329 }
1330 
CallNativeWasmCode(Address addr)1331 void LiftoffAssembler::CallNativeWasmCode(Address addr) {
1332   Call(addr, RelocInfo::WASM_CALL);
1333 }
1334 
CallIndirect(wasm::FunctionSig * sig,compiler::CallDescriptor * call_descriptor,Register target)1335 void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
1336                                     compiler::CallDescriptor* call_descriptor,
1337                                     Register target) {
1338   if (target == no_reg) {
1339     pop(kScratchReg);
1340     Call(kScratchReg);
1341   } else {
1342     Call(target);
1343   }
1344 }
1345 
CallRuntimeStub(WasmCode::RuntimeStubId sid)1346 void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
1347   // A direct call to a wasm runtime stub defined in this module.
1348   // Just encode the stub index. This will be patched at relocation.
1349   Call(static_cast<Address>(sid), RelocInfo::WASM_STUB_CALL);
1350 }
1351 
AllocateStackSlot(Register addr,uint32_t size)1352 void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
1353   addiu(sp, sp, -size);
1354   TurboAssembler::Move(addr, sp);
1355 }
1356 
DeallocateStackSlot(uint32_t size)1357 void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
1358   addiu(sp, sp, size);
1359 }
1360 
Construct()1361 void LiftoffStackSlots::Construct() {
1362   for (auto& slot : slots_) {
1363     const LiftoffAssembler::VarState& src = slot.src_;
1364     switch (src.loc()) {
1365       case LiftoffAssembler::VarState::kStack: {
1366         if (src.type() == kWasmF64) {
1367           DCHECK_EQ(kLowWord, slot.half_);
1368           asm_->lw(kScratchReg,
1369                    liftoff::GetHalfStackSlot(2 * slot.src_index_ - 1));
1370           asm_->push(kScratchReg);
1371         }
1372         asm_->lw(kScratchReg,
1373                  liftoff::GetHalfStackSlot(2 * slot.src_index_ +
1374                                            (slot.half_ == kLowWord ? 0 : 1)));
1375         asm_->push(kScratchReg);
1376         break;
1377       }
1378       case LiftoffAssembler::VarState::kRegister:
1379         if (src.type() == kWasmI64) {
1380           liftoff::push(
1381               asm_, slot.half_ == kLowWord ? src.reg().low() : src.reg().high(),
1382               kWasmI32);
1383         } else {
1384           liftoff::push(asm_, src.reg(), src.type());
1385         }
1386         break;
1387       case LiftoffAssembler::VarState::KIntConst: {
1388         // The high word is the sign extension of the low word.
1389         asm_->li(kScratchReg,
1390                  Operand(slot.half_ == kLowWord ? src.i32_const()
1391                                                 : src.i32_const() >> 31));
1392         asm_->push(kScratchReg);
1393         break;
1394       }
1395     }
1396   }
1397 }
1398 
1399 }  // namespace wasm
1400 }  // namespace internal
1401 }  // namespace v8
1402 
1403 #undef BAILOUT
1404 
1405 #endif  // V8_WASM_BASELINE_MIPS_LIFTOFF_ASSEMBLER_MIPS_H_
1406