• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_WASM_BASELINE_S390_LIFTOFF_ASSEMBLER_S390_H_
6 #define V8_WASM_BASELINE_S390_LIFTOFF_ASSEMBLER_S390_H_
7 
8 #include "src/base/platform/wrappers.h"
9 #include "src/codegen/assembler.h"
10 #include "src/heap/memory-chunk.h"
11 #include "src/wasm/baseline/liftoff-assembler.h"
12 #include "src/wasm/simd-shuffle.h"
13 #include "src/wasm/wasm-objects.h"
14 
15 namespace v8 {
16 namespace internal {
17 namespace wasm {
18 
19 namespace liftoff {
20 
ToCondition(LiftoffCondition liftoff_cond)21 inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
22   switch (liftoff_cond) {
23     case kEqual:
24       return eq;
25     case kUnequal:
26       return ne;
27     case kSignedLessThan:
28     case kUnsignedLessThan:
29       return lt;
30     case kSignedLessEqual:
31     case kUnsignedLessEqual:
32       return le;
33     case kSignedGreaterEqual:
34     case kUnsignedGreaterEqual:
35       return ge;
36     case kSignedGreaterThan:
37     case kUnsignedGreaterThan:
38       return gt;
39   }
40 }
41 
UseSignedOp(LiftoffCondition liftoff_cond)42 inline constexpr bool UseSignedOp(LiftoffCondition liftoff_cond) {
43   switch (liftoff_cond) {
44     case kEqual:
45     case kUnequal:
46     case kSignedLessThan:
47     case kSignedLessEqual:
48     case kSignedGreaterThan:
49     case kSignedGreaterEqual:
50       return true;
51     case kUnsignedLessThan:
52     case kUnsignedLessEqual:
53     case kUnsignedGreaterThan:
54     case kUnsignedGreaterEqual:
55       return false;
56     default:
57       UNREACHABLE();
58   }
59   return false;
60 }
61 
62 //  half
63 //  slot        Frame
64 //  -----+--------------------+---------------------------
65 //  n+3  |   parameter n      |
66 //  ...  |       ...          |
67 //   4   |   parameter 1      | or parameter 2
68 //   3   |   parameter 0      | or parameter 1
69 //   2   |  (result address)  | or parameter 0
70 //  -----+--------------------+---------------------------
71 //   1   | return addr (lr)   |
72 //   0   | previous frame (fp)|
73 //  -----+--------------------+  <-- frame ptr (fp)
74 //  -1   | StackFrame::WASM   |
75 //  -2   |    instance        |
76 //  -3   |    feedback vector |
77 //  -4   |    tiering budget  |
78 //  -----+--------------------+---------------------------
79 //  -5   |    slot 0 (high)   |   ^
80 //  -6   |    slot 0 (low)    |   |
81 //  -7   |    slot 1 (high)   | Frame slots
82 //  -8   |    slot 1 (low)    |   |
83 //       |                    |   v
84 //  -----+--------------------+  <-- stack ptr (sp)
85 //
86 constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
87 constexpr int kFeedbackVectorOffset = 3 * kSystemPointerSize;
88 constexpr int kTierupBudgetOffset = 4 * kSystemPointerSize;
GetStackSlot(uint32_t offset)89 inline MemOperand GetStackSlot(uint32_t offset) {
90   return MemOperand(fp, -offset);
91 }
92 
GetInstanceOperand()93 inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
94 
95 
96 }  // namespace liftoff
97 
PrepareStackFrame()98 int LiftoffAssembler::PrepareStackFrame() {
99   int offset = pc_offset();
100   lay(sp, MemOperand(sp));
101   return offset;
102 }
103 
PrepareTailCall(int num_callee_stack_params,int stack_param_delta)104 void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
105                                        int stack_param_delta) {
106   Register scratch = r1;
107   // Push the return address and frame pointer to complete the stack frame.
108   lay(sp, MemOperand(sp, -2 * kSystemPointerSize));
109   LoadU64(scratch, MemOperand(fp, kSystemPointerSize));
110   StoreU64(scratch, MemOperand(sp, kSystemPointerSize));
111   LoadU64(scratch, MemOperand(fp));
112   StoreU64(scratch, MemOperand(sp));
113 
114   // Shift the whole frame upwards.
115   int slot_count = num_callee_stack_params + 2;
116   for (int i = slot_count - 1; i >= 0; --i) {
117     LoadU64(scratch, MemOperand(sp, i * kSystemPointerSize));
118     StoreU64(scratch,
119              MemOperand(fp, (i - stack_param_delta) * kSystemPointerSize));
120   }
121 
122   // Set the new stack and frame pointer.
123   lay(sp, MemOperand(fp, -stack_param_delta * kSystemPointerSize));
124   Pop(r14, fp);
125 }
126 
AlignFrameSize()127 void LiftoffAssembler::AlignFrameSize() {}
128 
PatchPrepareStackFrame(int offset,SafepointTableBuilder * safepoint_table_builder)129 void LiftoffAssembler::PatchPrepareStackFrame(
130     int offset, SafepointTableBuilder* safepoint_table_builder) {
131   int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
132 
133   constexpr int LayInstrSize = 6;
134 
135   Assembler patching_assembler(
136       AssemblerOptions{},
137       ExternalAssemblerBuffer(buffer_start_ + offset, LayInstrSize + kGap));
138   if (V8_LIKELY(frame_size < 4 * KB)) {
139     patching_assembler.lay(sp, MemOperand(sp, -frame_size));
140     return;
141   }
142 
143   // The frame size is bigger than 4KB, so we might overflow the available stack
144   // space if we first allocate the frame and then do the stack check (we will
145   // need some remaining stack space for throwing the exception). That's why we
146   // check the available stack space before we allocate the frame. To do this we
147   // replace the {__ sub(sp, sp, framesize)} with a jump to OOL code that does
148   // this "extended stack check".
149   //
150   // The OOL code can simply be generated here with the normal assembler,
151   // because all other code generation, including OOL code, has already finished
152   // when {PatchPrepareStackFrame} is called. The function prologue then jumps
153   // to the current {pc_offset()} to execute the OOL code for allocating the
154   // large frame.
155 
156   // Emit the unconditional branch in the function prologue (from {offset} to
157   // {pc_offset()}).
158 
159   int jump_offset = pc_offset() - offset;
160   patching_assembler.branchOnCond(al, jump_offset, true, true);
161 
162   // If the frame is bigger than the stack, we throw the stack overflow
163   // exception unconditionally. Thereby we can avoid the integer overflow
164   // check in the condition code.
165   RecordComment("OOL: stack check for large frame");
166   Label continuation;
167   if (frame_size < FLAG_stack_size * 1024) {
168     Register stack_limit = ip;
169     LoadU64(stack_limit,
170             FieldMemOperand(kWasmInstanceRegister,
171                             WasmInstanceObject::kRealStackLimitAddressOffset),
172             r0);
173     LoadU64(stack_limit, MemOperand(stack_limit), r0);
174     AddU64(stack_limit, Operand(frame_size));
175     CmpU64(sp, stack_limit);
176     bge(&continuation);
177   }
178 
179   Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
180   // The call will not return; just define an empty safepoint.
181   safepoint_table_builder->DefineSafepoint(this);
182   if (FLAG_debug_code) stop();
183 
184   bind(&continuation);
185 
186   // Now allocate the stack space. Note that this might do more than just
187   // decrementing the SP; consult {TurboAssembler::AllocateStackSpace}.
188   lay(sp, MemOperand(sp, -frame_size));
189 
190   // Jump back to the start of the function, from {pc_offset()} to
191   // right after the reserved space for the {__ sub(sp, sp, framesize)} (which
192   // is a branch now).
193   jump_offset = offset - pc_offset() + 6;
194   branchOnCond(al, jump_offset, true);
195 }
196 
FinishCode()197 void LiftoffAssembler::FinishCode() {}
198 
AbortCompilation()199 void LiftoffAssembler::AbortCompilation() { AbortedCodeGeneration(); }
200 
201 // static
StaticStackFrameSize()202 constexpr int LiftoffAssembler::StaticStackFrameSize() {
203   return liftoff::kTierupBudgetOffset;
204 }
205 
SlotSizeForType(ValueKind kind)206 int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
207   switch (kind) {
208     case kS128:
209       return value_kind_size(kind);
210     default:
211       return kStackSlotSize;
212   }
213 }
214 
NeedsAlignment(ValueKind kind)215 bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
216   return (kind == kS128 || is_reference(kind));
217 }
218 
LoadConstant(LiftoffRegister reg,WasmValue value,RelocInfo::Mode rmode)219 void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
220                                     RelocInfo::Mode rmode) {
221   switch (value.type().kind()) {
222     case kI32:
223       mov(reg.gp(), Operand(value.to_i32(), rmode));
224       break;
225     case kI64:
226       mov(reg.gp(), Operand(value.to_i64(), rmode));
227       break;
228     case kF32: {
229       UseScratchRegisterScope temps(this);
230       Register scratch = temps.Acquire();
231       LoadF32(reg.fp(), value.to_f32_boxed().get_scalar(), scratch);
232       break;
233     }
234     case kF64: {
235       UseScratchRegisterScope temps(this);
236       Register scratch = temps.Acquire();
237       LoadF64(reg.fp(), value.to_f64_boxed().get_bits(), scratch);
238       break;
239     }
240     default:
241       UNREACHABLE();
242   }
243 }
244 
LoadInstanceFromFrame(Register dst)245 void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
246   LoadU64(dst, liftoff::GetInstanceOperand());
247 }
248 
LoadFromInstance(Register dst,Register instance,int offset,int size)249 void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
250                                         int offset, int size) {
251   DCHECK_LE(0, offset);
252   switch (size) {
253     case 1:
254       LoadU8(dst, MemOperand(instance, offset));
255       break;
256     case 4:
257       LoadU32(dst, MemOperand(instance, offset));
258       break;
259     case 8:
260       LoadU64(dst, MemOperand(instance, offset));
261       break;
262     default:
263       UNIMPLEMENTED();
264   }
265 }
266 
LoadTaggedPointerFromInstance(Register dst,Register instance,int offset)267 void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
268                                                      Register instance,
269                                                      int offset) {
270   DCHECK_LE(0, offset);
271   LoadTaggedPointerField(dst, MemOperand(instance, offset));
272 }
273 
SpillInstance(Register instance)274 void LiftoffAssembler::SpillInstance(Register instance) {
275   StoreU64(instance, liftoff::GetInstanceOperand());
276 }
277 
ResetOSRTarget()278 void LiftoffAssembler::ResetOSRTarget() {}
279 
LoadTaggedPointer(Register dst,Register src_addr,Register offset_reg,int32_t offset_imm,LiftoffRegList pinned)280 void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
281                                          Register offset_reg,
282                                          int32_t offset_imm,
283                                          LiftoffRegList pinned) {
284   CHECK(is_int20(offset_imm));
285   LoadTaggedPointerField(
286       dst,
287       MemOperand(src_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
288 }
289 
LoadFullPointer(Register dst,Register src_addr,int32_t offset_imm)290 void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
291                                        int32_t offset_imm) {
292   UseScratchRegisterScope temps(this);
293   LoadU64(dst, MemOperand(src_addr, offset_imm), r1);
294 }
295 
StoreTaggedPointer(Register dst_addr,Register offset_reg,int32_t offset_imm,LiftoffRegister src,LiftoffRegList pinned,SkipWriteBarrier skip_write_barrier)296 void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
297                                           Register offset_reg,
298                                           int32_t offset_imm,
299                                           LiftoffRegister src,
300                                           LiftoffRegList pinned,
301                                           SkipWriteBarrier skip_write_barrier) {
302   MemOperand dst_op =
303       MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm);
304   StoreTaggedField(src.gp(), dst_op);
305 
306   if (skip_write_barrier || FLAG_disable_write_barriers) return;
307 
308   Label write_barrier;
309   Label exit;
310   CheckPageFlag(dst_addr, r1, MemoryChunk::kPointersFromHereAreInterestingMask,
311                 ne, &write_barrier);
312   b(&exit);
313   bind(&write_barrier);
314   JumpIfSmi(src.gp(), &exit);
315   if (COMPRESS_POINTERS_BOOL) {
316     DecompressTaggedPointer(src.gp(), src.gp());
317   }
318   CheckPageFlag(src.gp(), r1, MemoryChunk::kPointersToHereAreInterestingMask,
319                 eq, &exit);
320   lay(r1, dst_op);
321   CallRecordWriteStubSaveRegisters(dst_addr, r1, RememberedSetAction::kEmit,
322                                    SaveFPRegsMode::kSave,
323                                    StubCallMode::kCallWasmRuntimeStub);
324   bind(&exit);
325 }
326 
Load(LiftoffRegister dst,Register src_addr,Register offset_reg,uintptr_t offset_imm,LoadType type,LiftoffRegList pinned,uint32_t * protected_load_pc,bool is_load_mem,bool i64_offset)327 void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
328                             Register offset_reg, uintptr_t offset_imm,
329                             LoadType type, LiftoffRegList pinned,
330                             uint32_t* protected_load_pc, bool is_load_mem,
331                             bool i64_offset) {
332   UseScratchRegisterScope temps(this);
333   if (!is_int20(offset_imm)) {
334     mov(ip, Operand(offset_imm));
335     if (offset_reg != no_reg) {
336       if (!i64_offset) {
337         // Clear the upper 32 bits of the 64 bit offset register.
338         llgfr(r0, offset_reg);
339         offset_reg = r0;
340       }
341       AddS64(ip, offset_reg);
342     }
343     offset_reg = ip;
344     offset_imm = 0;
345   }
346   MemOperand src_op =
347       MemOperand(src_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm);
348   if (protected_load_pc) *protected_load_pc = pc_offset();
349   switch (type.value()) {
350     case LoadType::kI32Load8U:
351     case LoadType::kI64Load8U:
352       LoadU8(dst.gp(), src_op);
353       break;
354     case LoadType::kI32Load8S:
355     case LoadType::kI64Load8S:
356       LoadS8(dst.gp(), src_op);
357       break;
358     case LoadType::kI32Load16U:
359     case LoadType::kI64Load16U:
360       if (is_load_mem) {
361         LoadU16LE(dst.gp(), src_op);
362       } else {
363         LoadU16(dst.gp(), src_op);
364       }
365       break;
366     case LoadType::kI32Load16S:
367     case LoadType::kI64Load16S:
368       if (is_load_mem) {
369         LoadS16LE(dst.gp(), src_op);
370       } else {
371         LoadS16(dst.gp(), src_op);
372       }
373       break;
374     case LoadType::kI64Load32U:
375       if (is_load_mem) {
376         LoadU32LE(dst.gp(), src_op);
377       } else {
378         LoadU32(dst.gp(), src_op);
379       }
380       break;
381     case LoadType::kI32Load:
382     case LoadType::kI64Load32S:
383       if (is_load_mem) {
384         LoadS32LE(dst.gp(), src_op);
385       } else {
386         LoadS32(dst.gp(), src_op);
387       }
388       break;
389     case LoadType::kI64Load:
390       if (is_load_mem) {
391         LoadU64LE(dst.gp(), src_op);
392       } else {
393         LoadU64(dst.gp(), src_op);
394       }
395       break;
396     case LoadType::kF32Load:
397       if (is_load_mem) {
398         LoadF32LE(dst.fp(), src_op, r0);
399       } else {
400         LoadF32(dst.fp(), src_op);
401       }
402       break;
403     case LoadType::kF64Load:
404       if (is_load_mem) {
405         LoadF64LE(dst.fp(), src_op, r0);
406       } else {
407         LoadF64(dst.fp(), src_op);
408       }
409       break;
410     case LoadType::kS128Load:
411       if (is_load_mem) {
412         LoadV128LE(dst.fp(), src_op, r1, r0);
413       } else {
414         LoadV128(dst.fp(), src_op, r1);
415       }
416       break;
417     default:
418       UNREACHABLE();
419   }
420 }
421 
Store(Register dst_addr,Register offset_reg,uintptr_t offset_imm,LiftoffRegister src,StoreType type,LiftoffRegList pinned,uint32_t * protected_store_pc,bool is_store_mem)422 void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
423                              uintptr_t offset_imm, LiftoffRegister src,
424                              StoreType type, LiftoffRegList pinned,
425                              uint32_t* protected_store_pc, bool is_store_mem) {
426   if (!is_int20(offset_imm)) {
427     mov(ip, Operand(offset_imm));
428     if (offset_reg != no_reg) {
429       AddS64(ip, offset_reg);
430     }
431     offset_reg = ip;
432     offset_imm = 0;
433   }
434   MemOperand dst_op =
435       MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm);
436   if (protected_store_pc) *protected_store_pc = pc_offset();
437   switch (type.value()) {
438     case StoreType::kI32Store8:
439     case StoreType::kI64Store8:
440       StoreU8(src.gp(), dst_op);
441       break;
442     case StoreType::kI32Store16:
443     case StoreType::kI64Store16:
444       if (is_store_mem) {
445         StoreU16LE(src.gp(), dst_op, r1);
446       } else {
447         StoreU16(src.gp(), dst_op, r1);
448       }
449       break;
450     case StoreType::kI32Store:
451     case StoreType::kI64Store32:
452       if (is_store_mem) {
453         StoreU32LE(src.gp(), dst_op, r1);
454       } else {
455         StoreU32(src.gp(), dst_op, r1);
456       }
457       break;
458     case StoreType::kI64Store:
459       if (is_store_mem) {
460         StoreU64LE(src.gp(), dst_op, r1);
461       } else {
462         StoreU64(src.gp(), dst_op, r1);
463       }
464       break;
465     case StoreType::kF32Store:
466       if (is_store_mem) {
467         StoreF32LE(src.fp(), dst_op, r1);
468       } else {
469         StoreF32(src.fp(), dst_op);
470       }
471       break;
472     case StoreType::kF64Store:
473       if (is_store_mem) {
474         StoreF64LE(src.fp(), dst_op, r1);
475       } else {
476         StoreF64(src.fp(), dst_op);
477       }
478       break;
479     case StoreType::kS128Store: {
480       if (is_store_mem) {
481         StoreV128LE(src.fp(), dst_op, r1, r0);
482       } else {
483         StoreV128(src.fp(), dst_op, r1);
484       }
485       break;
486     }
487     default:
488       UNREACHABLE();
489   }
490 }
491 
AtomicLoad(LiftoffRegister dst,Register src_addr,Register offset_reg,uintptr_t offset_imm,LoadType type,LiftoffRegList pinned)492 void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
493                                   Register offset_reg, uintptr_t offset_imm,
494                                   LoadType type, LiftoffRegList pinned) {
495   Load(dst, src_addr, offset_reg, offset_imm, type, pinned, nullptr, true);
496 }
497 
AtomicStore(Register dst_addr,Register offset_reg,uintptr_t offset_imm,LiftoffRegister src,StoreType type,LiftoffRegList pinned)498 void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
499                                    uintptr_t offset_imm, LiftoffRegister src,
500                                    StoreType type, LiftoffRegList pinned) {
501   if (!is_int20(offset_imm)) {
502     mov(ip, Operand(offset_imm));
503     if (offset_reg != no_reg) {
504       AddS64(ip, offset_reg);
505     }
506     offset_reg = ip;
507     offset_imm = 0;
508   }
509   lay(ip,
510       MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
511 
512   switch (type.value()) {
513     case StoreType::kI32Store8:
514     case StoreType::kI64Store8: {
515       AtomicExchangeU8(ip, src.gp(), r1, r0);
516       break;
517     }
518     case StoreType::kI32Store16:
519     case StoreType::kI64Store16: {
520 #ifdef V8_TARGET_BIG_ENDIAN
521       lrvr(r1, src.gp());
522       ShiftRightU32(r1, r1, Operand(16));
523 #else
524       LoadU16(r1, src.gp());
525 #endif
526       Push(r2);
527       AtomicExchangeU16(ip, r1, r2, r0);
528       Pop(r2);
529       break;
530     }
531     case StoreType::kI32Store:
532     case StoreType::kI64Store32: {
533 #ifdef V8_TARGET_BIG_ENDIAN
534       lrvr(r1, src.gp());
535 #else
536       LoadU32(r1, src.gp());
537 #endif
538       Label do_cs;
539       bind(&do_cs);
540       cs(r0, r1, MemOperand(ip));
541       bne(&do_cs, Label::kNear);
542       break;
543     }
544     case StoreType::kI64Store: {
545 #ifdef V8_TARGET_BIG_ENDIAN
546       lrvgr(r1, src.gp());
547 #else
548       mov(r1, src.gp());
549 #endif
550       Label do_cs;
551       bind(&do_cs);
552       csg(r0, r1, MemOperand(ip));
553       bne(&do_cs, Label::kNear);
554       break;
555     }
556     default:
557       UNREACHABLE();
558   }
559 }
560 
AtomicAdd(Register dst_addr,Register offset_reg,uintptr_t offset_imm,LiftoffRegister value,LiftoffRegister result,StoreType type)561 void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
562                                  uintptr_t offset_imm, LiftoffRegister value,
563                                  LiftoffRegister result, StoreType type) {
564   Register tmp1 = GetUnusedRegister(kGpReg, LiftoffRegList{dst_addr, offset_reg,
565                                                            value, result})
566                       .gp();
567   Register tmp2 = GetUnusedRegister(kGpReg, LiftoffRegList{dst_addr, offset_reg,
568                                                            value, result, tmp1})
569                       .gp();
570 
571   if (!is_int20(offset_imm)) {
572     mov(ip, Operand(offset_imm));
573     if (offset_reg != no_reg) {
574       AddS64(ip, offset_reg);
575     }
576     offset_reg = ip;
577     offset_imm = 0;
578   }
579   lay(ip,
580       MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
581 
582   switch (type.value()) {
583     case StoreType::kI32Store8:
584     case StoreType::kI64Store8: {
585       Label doadd;
586       bind(&doadd);
587       LoadU8(tmp1, MemOperand(ip));
588       AddS32(tmp2, tmp1, value.gp());
589       AtomicCmpExchangeU8(ip, result.gp(), tmp1, tmp2, r0, r1);
590       b(Condition(4), &doadd);
591       LoadU8(result.gp(), result.gp());
592       break;
593     }
594     case StoreType::kI32Store16:
595     case StoreType::kI64Store16: {
596       Label doadd;
597       bind(&doadd);
598       LoadU16(tmp1, MemOperand(ip));
599 #ifdef V8_TARGET_BIG_ENDIAN
600       lrvr(tmp2, tmp1);
601       ShiftRightU32(tmp2, tmp2, Operand(16));
602       AddS32(tmp2, tmp2, value.gp());
603       lrvr(tmp2, tmp2);
604       ShiftRightU32(tmp2, tmp2, Operand(16));
605 #else
606       AddS32(tmp2, tmp1, value.gp());
607 #endif
608       AtomicCmpExchangeU16(ip, result.gp(), tmp1, tmp2, r0, r1);
609       b(Condition(4), &doadd);
610       LoadU16(result.gp(), result.gp());
611 #ifdef V8_TARGET_BIG_ENDIAN
612       lrvr(result.gp(), result.gp());
613       ShiftRightU32(result.gp(), result.gp(), Operand(16));
614 #endif
615       break;
616     }
617     case StoreType::kI32Store:
618     case StoreType::kI64Store32: {
619       Label doadd;
620       bind(&doadd);
621       LoadU32(tmp1, MemOperand(ip));
622 #ifdef V8_TARGET_BIG_ENDIAN
623       lrvr(tmp2, tmp1);
624       AddS32(tmp2, tmp2, value.gp());
625       lrvr(tmp2, tmp2);
626 #else
627       AddS32(tmp2, tmp1, value.gp());
628 #endif
629       CmpAndSwap(tmp1, tmp2, MemOperand(ip));
630       b(Condition(4), &doadd);
631       LoadU32(result.gp(), tmp1);
632 #ifdef V8_TARGET_BIG_ENDIAN
633       lrvr(result.gp(), result.gp());
634 #endif
635       break;
636     }
637     case StoreType::kI64Store: {
638       Label doadd;
639       bind(&doadd);
640       LoadU64(tmp1, MemOperand(ip));
641 #ifdef V8_TARGET_BIG_ENDIAN
642       lrvgr(tmp2, tmp1);
643       AddS64(tmp2, tmp2, value.gp());
644       lrvgr(tmp2, tmp2);
645 #else
646       AddS64(tmp2, tmp1, value.gp());
647 #endif
648       CmpAndSwap64(tmp1, tmp2, MemOperand(ip));
649       b(Condition(4), &doadd);
650       mov(result.gp(), tmp1);
651 #ifdef V8_TARGET_BIG_ENDIAN
652       lrvgr(result.gp(), result.gp());
653 #endif
654       break;
655     }
656     default:
657       UNREACHABLE();
658   }
659 }
660 
AtomicSub(Register dst_addr,Register offset_reg,uintptr_t offset_imm,LiftoffRegister value,LiftoffRegister result,StoreType type)661 void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
662                                  uintptr_t offset_imm, LiftoffRegister value,
663                                  LiftoffRegister result, StoreType type) {
664   Register tmp1 = GetUnusedRegister(kGpReg, LiftoffRegList{dst_addr, offset_reg,
665                                                            value, result})
666                       .gp();
667   Register tmp2 = GetUnusedRegister(kGpReg, LiftoffRegList{dst_addr, offset_reg,
668                                                            value, result, tmp1})
669                       .gp();
670 
671   if (!is_int20(offset_imm)) {
672     mov(ip, Operand(offset_imm));
673     if (offset_reg != no_reg) {
674       AddS64(ip, offset_reg);
675     }
676     offset_reg = ip;
677     offset_imm = 0;
678   }
679   lay(ip,
680       MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
681 
682   switch (type.value()) {
683     case StoreType::kI32Store8:
684     case StoreType::kI64Store8: {
685       Label do_again;
686       bind(&do_again);
687       LoadU8(tmp1, MemOperand(ip));
688       SubS32(tmp2, tmp1, value.gp());
689       AtomicCmpExchangeU8(ip, result.gp(), tmp1, tmp2, r0, r1);
690       b(Condition(4), &do_again);
691       LoadU8(result.gp(), result.gp());
692       break;
693     }
694     case StoreType::kI32Store16:
695     case StoreType::kI64Store16: {
696       Label do_again;
697       bind(&do_again);
698       LoadU16(tmp1, MemOperand(ip));
699 #ifdef V8_TARGET_BIG_ENDIAN
700       lrvr(tmp2, tmp1);
701       ShiftRightU32(tmp2, tmp2, Operand(16));
702       SubS32(tmp2, tmp2, value.gp());
703       lrvr(tmp2, tmp2);
704       ShiftRightU32(tmp2, tmp2, Operand(16));
705 #else
706       SubS32(tmp2, tmp1, value.gp());
707 #endif
708       AtomicCmpExchangeU16(ip, result.gp(), tmp1, tmp2, r0, r1);
709       b(Condition(4), &do_again);
710       LoadU16(result.gp(), result.gp());
711 #ifdef V8_TARGET_BIG_ENDIAN
712       lrvr(result.gp(), result.gp());
713       ShiftRightU32(result.gp(), result.gp(), Operand(16));
714 #endif
715       break;
716     }
717     case StoreType::kI32Store:
718     case StoreType::kI64Store32: {
719       Label do_again;
720       bind(&do_again);
721       LoadU32(tmp1, MemOperand(ip));
722 #ifdef V8_TARGET_BIG_ENDIAN
723       lrvr(tmp2, tmp1);
724       SubS32(tmp2, tmp2, value.gp());
725       lrvr(tmp2, tmp2);
726 #else
727       SubS32(tmp2, tmp1, value.gp());
728 #endif
729       CmpAndSwap(tmp1, tmp2, MemOperand(ip));
730       b(Condition(4), &do_again);
731       LoadU32(result.gp(), tmp1);
732 #ifdef V8_TARGET_BIG_ENDIAN
733       lrvr(result.gp(), result.gp());
734 #endif
735       break;
736     }
737     case StoreType::kI64Store: {
738       Label do_again;
739       bind(&do_again);
740       LoadU64(tmp1, MemOperand(ip));
741 #ifdef V8_TARGET_BIG_ENDIAN
742       lrvgr(tmp2, tmp1);
743       SubS64(tmp2, tmp2, value.gp());
744       lrvgr(tmp2, tmp2);
745 #else
746       SubS64(tmp2, tmp1, value.gp());
747 #endif
748       CmpAndSwap64(tmp1, tmp2, MemOperand(ip));
749       b(Condition(4), &do_again);
750       mov(result.gp(), tmp1);
751 #ifdef V8_TARGET_BIG_ENDIAN
752       lrvgr(result.gp(), result.gp());
753 #endif
754       break;
755     }
756     default:
757       UNREACHABLE();
758   }
759 }
760 
AtomicAnd(Register dst_addr,Register offset_reg,uintptr_t offset_imm,LiftoffRegister value,LiftoffRegister result,StoreType type)761 void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
762                                  uintptr_t offset_imm, LiftoffRegister value,
763                                  LiftoffRegister result, StoreType type) {
764   Register tmp1 = GetUnusedRegister(kGpReg, LiftoffRegList{dst_addr, offset_reg,
765                                                            value, result})
766                       .gp();
767   Register tmp2 = GetUnusedRegister(kGpReg, LiftoffRegList{dst_addr, offset_reg,
768                                                            value, result, tmp1})
769                       .gp();
770 
771   if (!is_int20(offset_imm)) {
772     mov(ip, Operand(offset_imm));
773     if (offset_reg != no_reg) {
774       AddS64(ip, offset_reg);
775     }
776     offset_reg = ip;
777     offset_imm = 0;
778   }
779   lay(ip,
780       MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
781 
782   switch (type.value()) {
783     case StoreType::kI32Store8:
784     case StoreType::kI64Store8: {
785       Label do_again;
786       bind(&do_again);
787       LoadU8(tmp1, MemOperand(ip));
788       AndP(tmp2, tmp1, value.gp());
789       AtomicCmpExchangeU8(ip, result.gp(), tmp1, tmp2, r0, r1);
790       b(Condition(4), &do_again);
791       LoadU8(result.gp(), result.gp());
792       break;
793     }
794     case StoreType::kI32Store16:
795     case StoreType::kI64Store16: {
796       Label do_again;
797       bind(&do_again);
798       LoadU16(tmp1, MemOperand(ip));
799 #ifdef V8_TARGET_BIG_ENDIAN
800       lrvr(tmp2, tmp1);
801       ShiftRightU32(tmp2, tmp2, Operand(16));
802       AndP(tmp2, tmp2, value.gp());
803       lrvr(tmp2, tmp2);
804       ShiftRightU32(tmp2, tmp2, Operand(16));
805 #else
806       AndP(tmp2, tmp1, value.gp());
807 #endif
808       AtomicCmpExchangeU16(ip, result.gp(), tmp1, tmp2, r0, r1);
809       b(Condition(4), &do_again);
810       LoadU16(result.gp(), result.gp());
811 #ifdef V8_TARGET_BIG_ENDIAN
812       lrvr(result.gp(), result.gp());
813       ShiftRightU32(result.gp(), result.gp(), Operand(16));
814 #endif
815       break;
816     }
817     case StoreType::kI32Store:
818     case StoreType::kI64Store32: {
819       Label do_again;
820       bind(&do_again);
821       LoadU32(tmp1, MemOperand(ip));
822 #ifdef V8_TARGET_BIG_ENDIAN
823       lrvr(tmp2, tmp1);
824       AndP(tmp2, tmp2, value.gp());
825       lrvr(tmp2, tmp2);
826 #else
827       AndP(tmp2, tmp1, value.gp());
828 #endif
829       CmpAndSwap(tmp1, tmp2, MemOperand(ip));
830       b(Condition(4), &do_again);
831       LoadU32(result.gp(), tmp1);
832 #ifdef V8_TARGET_BIG_ENDIAN
833       lrvr(result.gp(), result.gp());
834 #endif
835       break;
836     }
837     case StoreType::kI64Store: {
838       Label do_again;
839       bind(&do_again);
840       LoadU64(tmp1, MemOperand(ip));
841 #ifdef V8_TARGET_BIG_ENDIAN
842       lrvgr(tmp2, tmp1);
843       AndP(tmp2, tmp2, value.gp());
844       lrvgr(tmp2, tmp2);
845 #else
846       AndP(tmp2, tmp1, value.gp());
847 #endif
848       CmpAndSwap64(tmp1, tmp2, MemOperand(ip));
849       b(Condition(4), &do_again);
850       mov(result.gp(), tmp1);
851 #ifdef V8_TARGET_BIG_ENDIAN
852       lrvgr(result.gp(), result.gp());
853 #endif
854       break;
855     }
856     default:
857       UNREACHABLE();
858   }
859 }
860 
AtomicOr(Register dst_addr,Register offset_reg,uintptr_t offset_imm,LiftoffRegister value,LiftoffRegister result,StoreType type)861 void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
862                                 uintptr_t offset_imm, LiftoffRegister value,
863                                 LiftoffRegister result, StoreType type) {
864   Register tmp1 = GetUnusedRegister(kGpReg, LiftoffRegList{dst_addr, offset_reg,
865                                                            value, result})
866                       .gp();
867   Register tmp2 = GetUnusedRegister(kGpReg, LiftoffRegList{dst_addr, offset_reg,
868                                                            value, result, tmp1})
869                       .gp();
870 
871   if (!is_int20(offset_imm)) {
872     mov(ip, Operand(offset_imm));
873     if (offset_reg != no_reg) {
874       AddS64(ip, offset_reg);
875     }
876     offset_reg = ip;
877     offset_imm = 0;
878   }
879   lay(ip,
880       MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
881 
882   switch (type.value()) {
883     case StoreType::kI32Store8:
884     case StoreType::kI64Store8: {
885       Label do_again;
886       bind(&do_again);
887       LoadU8(tmp1, MemOperand(ip));
888       OrP(tmp2, tmp1, value.gp());
889       AtomicCmpExchangeU8(ip, result.gp(), tmp1, tmp2, r0, r1);
890       b(Condition(4), &do_again);
891       LoadU8(result.gp(), result.gp());
892       break;
893     }
894     case StoreType::kI32Store16:
895     case StoreType::kI64Store16: {
896       Label do_again;
897       bind(&do_again);
898       LoadU16(tmp1, MemOperand(ip));
899 #ifdef V8_TARGET_BIG_ENDIAN
900       lrvr(tmp2, tmp1);
901       ShiftRightU32(tmp2, tmp2, Operand(16));
902       OrP(tmp2, tmp2, value.gp());
903       lrvr(tmp2, tmp2);
904       ShiftRightU32(tmp2, tmp2, Operand(16));
905 #else
906       OrP(tmp2, tmp1, value.gp());
907 #endif
908       AtomicCmpExchangeU16(ip, result.gp(), tmp1, tmp2, r0, r1);
909       b(Condition(4), &do_again);
910       LoadU16(result.gp(), result.gp());
911 #ifdef V8_TARGET_BIG_ENDIAN
912       lrvr(result.gp(), result.gp());
913       ShiftRightU32(result.gp(), result.gp(), Operand(16));
914 #endif
915       break;
916     }
917     case StoreType::kI32Store:
918     case StoreType::kI64Store32: {
919       Label do_again;
920       bind(&do_again);
921       LoadU32(tmp1, MemOperand(ip));
922 #ifdef V8_TARGET_BIG_ENDIAN
923       lrvr(tmp2, tmp1);
924       OrP(tmp2, tmp2, value.gp());
925       lrvr(tmp2, tmp2);
926 #else
927       OrP(tmp2, tmp1, value.gp());
928 #endif
929       CmpAndSwap(tmp1, tmp2, MemOperand(ip));
930       b(Condition(4), &do_again);
931       LoadU32(result.gp(), tmp1);
932 #ifdef V8_TARGET_BIG_ENDIAN
933       lrvr(result.gp(), result.gp());
934 #endif
935       break;
936     }
937     case StoreType::kI64Store: {
938       Label do_again;
939       bind(&do_again);
940       LoadU64(tmp1, MemOperand(ip));
941 #ifdef V8_TARGET_BIG_ENDIAN
942       lrvgr(tmp2, tmp1);
943       OrP(tmp2, tmp2, value.gp());
944       lrvgr(tmp2, tmp2);
945 #else
946       OrP(tmp2, tmp1, value.gp());
947 #endif
948       CmpAndSwap64(tmp1, tmp2, MemOperand(ip));
949       b(Condition(4), &do_again);
950       mov(result.gp(), tmp1);
951 #ifdef V8_TARGET_BIG_ENDIAN
952       lrvgr(result.gp(), result.gp());
953 #endif
954       break;
955     }
956     default:
957       UNREACHABLE();
958   }
959 }
960 
AtomicXor(Register dst_addr,Register offset_reg,uintptr_t offset_imm,LiftoffRegister value,LiftoffRegister result,StoreType type)961 void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
962                                  uintptr_t offset_imm, LiftoffRegister value,
963                                  LiftoffRegister result, StoreType type) {
964   Register tmp1 = GetUnusedRegister(kGpReg, LiftoffRegList{dst_addr, offset_reg,
965                                                            value, result})
966                       .gp();
967   Register tmp2 = GetUnusedRegister(kGpReg, LiftoffRegList{dst_addr, offset_reg,
968                                                            value, result, tmp1})
969                       .gp();
970 
971   if (!is_int20(offset_imm)) {
972     mov(ip, Operand(offset_imm));
973     if (offset_reg != no_reg) {
974       AddS64(ip, offset_reg);
975     }
976     offset_reg = ip;
977     offset_imm = 0;
978   }
979   lay(ip,
980       MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
981 
982   switch (type.value()) {
983     case StoreType::kI32Store8:
984     case StoreType::kI64Store8: {
985       Label do_again;
986       bind(&do_again);
987       LoadU8(tmp1, MemOperand(ip));
988       XorP(tmp2, tmp1, value.gp());
989       AtomicCmpExchangeU8(ip, result.gp(), tmp1, tmp2, r0, r1);
990       b(Condition(4), &do_again);
991       LoadU8(result.gp(), result.gp());
992       break;
993     }
994     case StoreType::kI32Store16:
995     case StoreType::kI64Store16: {
996       Label do_again;
997       bind(&do_again);
998       LoadU16(tmp1, MemOperand(ip));
999 #ifdef V8_TARGET_BIG_ENDIAN
1000       lrvr(tmp2, tmp1);
1001       ShiftRightU32(tmp2, tmp2, Operand(16));
1002       XorP(tmp2, tmp2, value.gp());
1003       lrvr(tmp2, tmp2);
1004       ShiftRightU32(tmp2, tmp2, Operand(16));
1005 #else
1006       XorP(tmp2, tmp1, value.gp());
1007 #endif
1008       AtomicCmpExchangeU16(ip, result.gp(), tmp1, tmp2, r0, r1);
1009       b(Condition(4), &do_again);
1010       LoadU16(result.gp(), result.gp());
1011 #ifdef V8_TARGET_BIG_ENDIAN
1012       lrvr(result.gp(), result.gp());
1013       ShiftRightU32(result.gp(), result.gp(), Operand(16));
1014 #endif
1015       break;
1016     }
1017     case StoreType::kI32Store:
1018     case StoreType::kI64Store32: {
1019       Label do_again;
1020       bind(&do_again);
1021       LoadU32(tmp1, MemOperand(ip));
1022 #ifdef V8_TARGET_BIG_ENDIAN
1023       lrvr(tmp2, tmp1);
1024       XorP(tmp2, tmp2, value.gp());
1025       lrvr(tmp2, tmp2);
1026 #else
1027       XorP(tmp2, tmp1, value.gp());
1028 #endif
1029       CmpAndSwap(tmp1, tmp2, MemOperand(ip));
1030       b(Condition(4), &do_again);
1031       LoadU32(result.gp(), tmp1);
1032 #ifdef V8_TARGET_BIG_ENDIAN
1033       lrvr(result.gp(), result.gp());
1034 #endif
1035       break;
1036     }
1037     case StoreType::kI64Store: {
1038       Label do_again;
1039       bind(&do_again);
1040       LoadU64(tmp1, MemOperand(ip));
1041 #ifdef V8_TARGET_BIG_ENDIAN
1042       lrvgr(tmp2, tmp1);
1043       XorP(tmp2, tmp2, value.gp());
1044       lrvgr(tmp2, tmp2);
1045 #else
1046       XorP(tmp2, tmp1, value.gp());
1047 #endif
1048       CmpAndSwap64(tmp1, tmp2, MemOperand(ip));
1049       b(Condition(4), &do_again);
1050       mov(result.gp(), tmp1);
1051 #ifdef V8_TARGET_BIG_ENDIAN
1052       lrvgr(result.gp(), result.gp());
1053 #endif
1054       break;
1055     }
1056     default:
1057       UNREACHABLE();
1058   }
1059 }
1060 
AtomicExchange(Register dst_addr,Register offset_reg,uintptr_t offset_imm,LiftoffRegister value,LiftoffRegister result,StoreType type)1061 void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
1062                                       uintptr_t offset_imm,
1063                                       LiftoffRegister value,
1064                                       LiftoffRegister result, StoreType type) {
1065   if (!is_int20(offset_imm)) {
1066     mov(ip, Operand(offset_imm));
1067     if (offset_reg != no_reg) {
1068       AddS64(ip, offset_reg);
1069     }
1070     offset_reg = ip;
1071     offset_imm = 0;
1072   }
1073   lay(ip,
1074       MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
1075 
1076   switch (type.value()) {
1077     case StoreType::kI32Store8:
1078     case StoreType::kI64Store8: {
1079       AtomicExchangeU8(ip, value.gp(), result.gp(), r0);
1080       LoadU8(result.gp(), result.gp());
1081       break;
1082     }
1083     case StoreType::kI32Store16:
1084     case StoreType::kI64Store16: {
1085 #ifdef V8_TARGET_BIG_ENDIAN
1086       lrvr(r1, value.gp());
1087       ShiftRightU32(r1, r1, Operand(16));
1088 #else
1089       LoadU16(r1, value.gp());
1090 #endif
1091       AtomicExchangeU16(ip, r1, result.gp(), r0);
1092 #ifdef V8_TARGET_BIG_ENDIAN
1093       lrvr(result.gp(), result.gp());
1094       ShiftRightU32(result.gp(), result.gp(), Operand(16));
1095 #else
1096       LoadU16(result.gp(), result.gp());
1097 #endif
1098       break;
1099     }
1100     case StoreType::kI32Store:
1101     case StoreType::kI64Store32: {
1102 #ifdef V8_TARGET_BIG_ENDIAN
1103       lrvr(r1, value.gp());
1104 #else
1105       LoadU32(r1, value.gp());
1106 #endif
1107       Label do_cs;
1108       bind(&do_cs);
1109       cs(result.gp(), r1, MemOperand(ip));
1110       bne(&do_cs, Label::kNear);
1111 #ifdef V8_TARGET_BIG_ENDIAN
1112       lrvr(result.gp(), result.gp());
1113 #endif
1114       LoadU32(result.gp(), result.gp());
1115       break;
1116     }
1117     case StoreType::kI64Store: {
1118 #ifdef V8_TARGET_BIG_ENDIAN
1119       lrvgr(r1, value.gp());
1120 #else
1121       mov(r1, value.gp());
1122 #endif
1123       Label do_cs;
1124       bind(&do_cs);
1125       csg(result.gp(), r1, MemOperand(ip));
1126       bne(&do_cs, Label::kNear);
1127 #ifdef V8_TARGET_BIG_ENDIAN
1128       lrvgr(result.gp(), result.gp());
1129 #endif
1130       break;
1131     }
1132     default:
1133       UNREACHABLE();
1134   }
1135 }
1136 
AtomicCompareExchange(Register dst_addr,Register offset_reg,uintptr_t offset_imm,LiftoffRegister expected,LiftoffRegister new_value,LiftoffRegister result,StoreType type)1137 void LiftoffAssembler::AtomicCompareExchange(
1138     Register dst_addr, Register offset_reg, uintptr_t offset_imm,
1139     LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
1140     StoreType type) {
1141   if (!is_int20(offset_imm)) {
1142     mov(ip, Operand(offset_imm));
1143     if (offset_reg != no_reg) {
1144       AddS64(ip, offset_reg);
1145     }
1146     offset_reg = ip;
1147     offset_imm = 0;
1148   }
1149   lay(ip,
1150       MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
1151 
1152   switch (type.value()) {
1153     case StoreType::kI32Store8:
1154     case StoreType::kI64Store8: {
1155       AtomicCmpExchangeU8(ip, result.gp(), expected.gp(), new_value.gp(), r0,
1156                           r1);
1157       LoadU8(result.gp(), result.gp());
1158       break;
1159     }
1160     case StoreType::kI32Store16:
1161     case StoreType::kI64Store16: {
1162       Push(r2, r3);
1163 #ifdef V8_TARGET_BIG_ENDIAN
1164       lrvr(r2, expected.gp());
1165       lrvr(r3, new_value.gp());
1166       ShiftRightU32(r2, r2, Operand(16));
1167       ShiftRightU32(r3, r3, Operand(16));
1168 #else
1169       LoadU16(r2, expected.gp());
1170       LoadU16(r3, new_value.gp());
1171 #endif
1172       AtomicCmpExchangeU16(ip, result.gp(), r2, r3, r0, r1);
1173       LoadU16(result.gp(), result.gp());
1174 #ifdef V8_TARGET_BIG_ENDIAN
1175       lrvr(result.gp(), result.gp());
1176       ShiftRightU32(result.gp(), result.gp(), Operand(16));
1177 #endif
1178       Pop(r2, r3);
1179       break;
1180     }
1181     case StoreType::kI32Store:
1182     case StoreType::kI64Store32: {
1183       Push(r2, r3);
1184 #ifdef V8_TARGET_BIG_ENDIAN
1185       lrvr(r2, expected.gp());
1186       lrvr(r3, new_value.gp());
1187 #else
1188       LoadU32(r2, expected.gp());
1189       LoadU32(r3, new_value.gp());
1190 #endif
1191       CmpAndSwap(r2, r3, MemOperand(ip));
1192       LoadU32(result.gp(), r2);
1193 #ifdef V8_TARGET_BIG_ENDIAN
1194       lrvr(result.gp(), result.gp());
1195 #endif
1196       Pop(r2, r3);
1197       break;
1198     }
1199     case StoreType::kI64Store: {
1200       Push(r2, r3);
1201 #ifdef V8_TARGET_BIG_ENDIAN
1202       lrvgr(r2, expected.gp());
1203       lrvgr(r3, new_value.gp());
1204 #else
1205       mov(r2, expected.gp());
1206       mov(r3, new_value.gp());
1207 #endif
1208       CmpAndSwap64(r2, r3, MemOperand(ip));
1209       mov(result.gp(), r2);
1210 #ifdef V8_TARGET_BIG_ENDIAN
1211       lrvgr(result.gp(), result.gp());
1212 #endif
1213       Pop(r2, r3);
1214       break;
1215     }
1216     default:
1217       UNREACHABLE();
1218   }
1219 }
1220 
AtomicFence()1221 void LiftoffAssembler::AtomicFence() { bailout(kAtomics, "AtomicFence"); }
1222 
LoadCallerFrameSlot(LiftoffRegister dst,uint32_t caller_slot_idx,ValueKind kind)1223 void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
1224                                            uint32_t caller_slot_idx,
1225                                            ValueKind kind) {
1226   int32_t offset = (caller_slot_idx + 1) * 8;
1227   switch (kind) {
1228     case kI32: {
1229 #if defined(V8_TARGET_BIG_ENDIAN)
1230       LoadS32(dst.gp(), MemOperand(fp, offset + 4));
1231       break;
1232 #else
1233       LoadS32(dst.gp(), MemOperand(fp, offset));
1234       break;
1235 #endif
1236     }
1237     case kRef:
1238     case kRtt:
1239     case kOptRef:
1240     case kI64: {
1241       LoadU64(dst.gp(), MemOperand(fp, offset));
1242       break;
1243     }
1244     case kF32: {
1245       LoadF32(dst.fp(), MemOperand(fp, offset));
1246       break;
1247     }
1248     case kF64: {
1249       LoadF64(dst.fp(), MemOperand(fp, offset));
1250       break;
1251     }
1252     case kS128: {
1253       UseScratchRegisterScope temps(this);
1254       Register scratch = temps.Acquire();
1255       LoadV128(dst.fp(), MemOperand(fp, offset), scratch);
1256       break;
1257     }
1258     default:
1259       UNREACHABLE();
1260   }
1261 }
1262 
StoreCallerFrameSlot(LiftoffRegister src,uint32_t caller_slot_idx,ValueKind kind)1263 void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
1264                                             uint32_t caller_slot_idx,
1265                                             ValueKind kind) {
1266   int32_t offset = (caller_slot_idx + 1) * 8;
1267   switch (kind) {
1268     case kI32: {
1269 #if defined(V8_TARGET_BIG_ENDIAN)
1270       StoreU32(src.gp(), MemOperand(fp, offset + 4));
1271       break;
1272 #else
1273       StoreU32(src.gp(), MemOperand(fp, offset));
1274       break;
1275 #endif
1276     }
1277     case kRef:
1278     case kRtt:
1279     case kOptRef:
1280     case kI64: {
1281       StoreU64(src.gp(), MemOperand(fp, offset));
1282       break;
1283     }
1284     case kF32: {
1285       StoreF32(src.fp(), MemOperand(fp, offset));
1286       break;
1287     }
1288     case kF64: {
1289       StoreF64(src.fp(), MemOperand(fp, offset));
1290       break;
1291     }
1292     case kS128: {
1293       UseScratchRegisterScope temps(this);
1294       Register scratch = temps.Acquire();
1295       StoreV128(src.fp(), MemOperand(fp, offset), scratch);
1296       break;
1297     }
1298     default:
1299       UNREACHABLE();
1300   }
1301 }
1302 
LoadReturnStackSlot(LiftoffRegister dst,int offset,ValueKind kind)1303 void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
1304                                            ValueKind kind) {
1305   switch (kind) {
1306     case kI32: {
1307 #if defined(V8_TARGET_BIG_ENDIAN)
1308       LoadS32(dst.gp(), MemOperand(sp, offset + 4));
1309       break;
1310 #else
1311       LoadS32(dst.gp(), MemOperand(sp, offset));
1312       break;
1313 #endif
1314     }
1315     case kRef:
1316     case kRtt:
1317     case kOptRef:
1318     case kI64: {
1319       LoadU64(dst.gp(), MemOperand(sp, offset));
1320       break;
1321     }
1322     case kF32: {
1323       LoadF32(dst.fp(), MemOperand(sp, offset));
1324       break;
1325     }
1326     case kF64: {
1327       LoadF64(dst.fp(), MemOperand(sp, offset));
1328       break;
1329     }
1330     case kS128: {
1331       UseScratchRegisterScope temps(this);
1332       Register scratch = temps.Acquire();
1333       LoadV128(dst.fp(), MemOperand(sp, offset), scratch);
1334       break;
1335     }
1336     default:
1337       UNREACHABLE();
1338   }
1339 }
1340 
1341 #ifdef V8_TARGET_BIG_ENDIAN
1342 constexpr int stack_bias = -4;
1343 #else
1344 constexpr int stack_bias = 0;
1345 #endif
1346 
MoveStackValue(uint32_t dst_offset,uint32_t src_offset,ValueKind kind)1347 void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
1348                                       ValueKind kind) {
1349   DCHECK_NE(dst_offset, src_offset);
1350   int length = 0;
1351   switch (kind) {
1352     case kI32:
1353     case kF32:
1354       length = 4;
1355       break;
1356     case kI64:
1357     case kOptRef:
1358     case kRef:
1359     case kRtt:
1360     case kF64:
1361       length = 8;
1362       break;
1363     case kS128:
1364       length = 16;
1365       break;
1366     default:
1367       UNREACHABLE();
1368   }
1369 
1370   dst_offset += (length == 4 ? stack_bias : 0);
1371   src_offset += (length == 4 ? stack_bias : 0);
1372 
1373   if (is_int20(dst_offset)) {
1374     lay(ip, liftoff::GetStackSlot(dst_offset));
1375   } else {
1376     mov(ip, Operand(-dst_offset));
1377     lay(ip, MemOperand(fp, ip));
1378   }
1379 
1380   if (is_int20(src_offset)) {
1381     lay(r1, liftoff::GetStackSlot(src_offset));
1382   } else {
1383     mov(r1, Operand(-src_offset));
1384     lay(r1, MemOperand(fp, r1));
1385   }
1386 
1387   MoveChar(MemOperand(ip), MemOperand(r1), Operand(length));
1388 }
1389 
Move(Register dst,Register src,ValueKind kind)1390 void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
1391   mov(dst, src);
1392 }
1393 
Move(DoubleRegister dst,DoubleRegister src,ValueKind kind)1394 void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
1395                             ValueKind kind) {
1396   DCHECK_NE(dst, src);
1397   if (kind == kF32) {
1398     ler(dst, src);
1399   } else if (kind == kF64) {
1400     ldr(dst, src);
1401   } else {
1402     DCHECK_EQ(kS128, kind);
1403     vlr(dst, src, Condition(0), Condition(0), Condition(0));
1404   }
1405 }
1406 
Spill(int offset,LiftoffRegister reg,ValueKind kind)1407 void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
1408   DCHECK_LT(0, offset);
1409   RecordUsedSpillOffset(offset);
1410 
1411   switch (kind) {
1412     case kI32:
1413       StoreU32(reg.gp(), liftoff::GetStackSlot(offset + stack_bias));
1414       break;
1415     case kI64:
1416     case kOptRef:
1417     case kRef:
1418     case kRtt:
1419       StoreU64(reg.gp(), liftoff::GetStackSlot(offset));
1420       break;
1421     case kF32:
1422       StoreF32(reg.fp(), liftoff::GetStackSlot(offset + stack_bias));
1423       break;
1424     case kF64:
1425       StoreF64(reg.fp(), liftoff::GetStackSlot(offset));
1426       break;
1427     case kS128: {
1428       UseScratchRegisterScope temps(this);
1429       Register scratch = temps.Acquire();
1430       StoreV128(reg.fp(), liftoff::GetStackSlot(offset), scratch);
1431       break;
1432     }
1433     default:
1434       UNREACHABLE();
1435   }
1436 }
1437 
Spill(int offset,WasmValue value)1438 void LiftoffAssembler::Spill(int offset, WasmValue value) {
1439   RecordUsedSpillOffset(offset);
1440   UseScratchRegisterScope temps(this);
1441   Register src = no_reg;
1442   src = ip;
1443   switch (value.type().kind()) {
1444     case kI32: {
1445       mov(src, Operand(value.to_i32()));
1446       StoreU32(src, liftoff::GetStackSlot(offset + stack_bias));
1447       break;
1448     }
1449     case kI64: {
1450       mov(src, Operand(value.to_i64()));
1451       StoreU64(src, liftoff::GetStackSlot(offset));
1452       break;
1453     }
1454     default:
1455       // We do not track f32 and f64 constants, hence they are unreachable.
1456       UNREACHABLE();
1457   }
1458 }
1459 
Fill(LiftoffRegister reg,int offset,ValueKind kind)1460 void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
1461   switch (kind) {
1462     case kI32:
1463       LoadS32(reg.gp(), liftoff::GetStackSlot(offset + stack_bias));
1464       break;
1465     case kI64:
1466     case kRef:
1467     case kOptRef:
1468     case kRtt:
1469       LoadU64(reg.gp(), liftoff::GetStackSlot(offset));
1470       break;
1471     case kF32:
1472       LoadF32(reg.fp(), liftoff::GetStackSlot(offset + stack_bias));
1473       break;
1474     case kF64:
1475       LoadF64(reg.fp(), liftoff::GetStackSlot(offset));
1476       break;
1477     case kS128: {
1478       UseScratchRegisterScope temps(this);
1479       Register scratch = temps.Acquire();
1480       LoadV128(reg.fp(), liftoff::GetStackSlot(offset), scratch);
1481       break;
1482     }
1483     default:
1484       UNREACHABLE();
1485   }
1486 }
1487 
FillI64Half(Register,int offset,RegPairHalf)1488 void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
1489   UNREACHABLE();
1490 }
1491 
FillStackSlotsWithZero(int start,int size)1492 void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
1493   DCHECK_LT(0, size);
1494   DCHECK_EQ(0, size % 4);
1495   RecordUsedSpillOffset(start + size);
1496 
1497   // We need a zero reg. Always use r0 for that, and push it before to restore
1498   // its value afterwards.
1499   push(r0);
1500   mov(r0, Operand(0));
1501 
1502   if (size <= 5 * kStackSlotSize) {
1503     // Special straight-line code for up to five slots. Generates two
1504     // instructions per slot.
1505     uint32_t remainder = size;
1506     for (; remainder >= kStackSlotSize; remainder -= kStackSlotSize) {
1507       StoreU64(r0, liftoff::GetStackSlot(start + remainder));
1508     }
1509     DCHECK(remainder == 4 || remainder == 0);
1510     if (remainder) {
1511       StoreU32(r0, liftoff::GetStackSlot(start + remainder));
1512     }
1513   } else {
1514     // General case for bigger counts (9 instructions).
1515     // Use r3 for start address (inclusive), r4 for end address (exclusive).
1516     push(r3);
1517     push(r4);
1518 
1519     lay(r3, MemOperand(fp, -start - size));
1520     lay(r4, MemOperand(fp, -start));
1521 
1522     Label loop;
1523     bind(&loop);
1524     StoreU64(r0, MemOperand(r3));
1525     lay(r3, MemOperand(r3, kSystemPointerSize));
1526     CmpU64(r3, r4);
1527     bne(&loop);
1528     pop(r4);
1529     pop(r3);
1530   }
1531 
1532   pop(r0);
1533 }
1534 
1535 #define SIGN_EXT(r) lgfr(r, r)
1536 #define INT32_AND_WITH_1F(x) Operand(x & 0x1f)
1537 #define REGISTER_AND_WITH_1F    \
1538   ([&](Register rhs) {          \
1539     AndP(r1, rhs, Operand(31)); \
1540     return r1;                  \
1541   })
1542 
1543 #define LFR_TO_REG(reg) reg.gp()
1544 
1545 // V(name, instr, dtype, stype, dcast, scast, rcast, return_val, return_type)
1546 #define UNOP_LIST(V)                                                           \
1547   V(i32_popcnt, Popcnt32, Register, Register, , , USE, true, bool)             \
1548   V(i64_popcnt, Popcnt64, LiftoffRegister, LiftoffRegister, LFR_TO_REG,        \
1549     LFR_TO_REG, USE, true, bool)                                               \
1550   V(u32_to_uintptr, LoadU32, Register, Register, , , USE, , void)              \
1551   V(i32_signextend_i8, lbr, Register, Register, , , USE, , void)               \
1552   V(i32_signextend_i16, lhr, Register, Register, , , USE, , void)              \
1553   V(i64_signextend_i8, lgbr, LiftoffRegister, LiftoffRegister, LFR_TO_REG,     \
1554     LFR_TO_REG, USE, , void)                                                   \
1555   V(i64_signextend_i16, lghr, LiftoffRegister, LiftoffRegister, LFR_TO_REG,    \
1556     LFR_TO_REG, USE, , void)                                                   \
1557   V(i64_signextend_i32, LoadS32, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
1558     LFR_TO_REG, USE, , void)                                                   \
1559   V(i32_clz, CountLeadingZerosU32, Register, Register, , , USE, , void)        \
1560   V(i32_ctz, CountTrailingZerosU32, Register, Register, , , USE, , void)       \
1561   V(i64_clz, CountLeadingZerosU64, LiftoffRegister, LiftoffRegister,           \
1562     LFR_TO_REG, LFR_TO_REG, USE, , void)                                       \
1563   V(i64_ctz, CountTrailingZerosU64, LiftoffRegister, LiftoffRegister,          \
1564     LFR_TO_REG, LFR_TO_REG, USE, , void)                                       \
1565   V(f32_ceil, CeilF32, DoubleRegister, DoubleRegister, , , USE, true, bool)    \
1566   V(f32_floor, FloorF32, DoubleRegister, DoubleRegister, , , USE, true, bool)  \
1567   V(f32_trunc, TruncF32, DoubleRegister, DoubleRegister, , , USE, true, bool)  \
1568   V(f32_nearest_int, NearestIntF32, DoubleRegister, DoubleRegister, , , USE,   \
1569     true, bool)                                                                \
1570   V(f32_abs, lpebr, DoubleRegister, DoubleRegister, , , USE, , void)           \
1571   V(f32_neg, lcebr, DoubleRegister, DoubleRegister, , , USE, , void)           \
1572   V(f32_sqrt, sqebr, DoubleRegister, DoubleRegister, , , USE, , void)          \
1573   V(f64_ceil, CeilF64, DoubleRegister, DoubleRegister, , , USE, true, bool)    \
1574   V(f64_floor, FloorF64, DoubleRegister, DoubleRegister, , , USE, true, bool)  \
1575   V(f64_trunc, TruncF64, DoubleRegister, DoubleRegister, , , USE, true, bool)  \
1576   V(f64_nearest_int, NearestIntF64, DoubleRegister, DoubleRegister, , , USE,   \
1577     true, bool)                                                                \
1578   V(f64_abs, lpdbr, DoubleRegister, DoubleRegister, , , USE, , void)           \
1579   V(f64_neg, lcdbr, DoubleRegister, DoubleRegister, , , USE, , void)           \
1580   V(f64_sqrt, sqdbr, DoubleRegister, DoubleRegister, , , USE, , void)
1581 
1582 #define EMIT_UNOP_FUNCTION(name, instr, dtype, stype, dcast, scast, rcast, \
1583                            ret, return_type)                               \
1584   return_type LiftoffAssembler::emit_##name(dtype dst, stype src) {        \
1585     auto _dst = dcast(dst);                                                \
1586     auto _src = scast(src);                                                \
1587     instr(_dst, _src);                                                     \
1588     rcast(_dst);                                                           \
1589     return ret;                                                            \
1590   }
1591 UNOP_LIST(EMIT_UNOP_FUNCTION)
1592 #undef EMIT_UNOP_FUNCTION
1593 #undef UNOP_LIST
1594 
1595 // V(name, instr, dtype, stype1, stype2, dcast, scast1, scast2, rcast,
1596 // return_val, return_type)
1597 #define BINOP_LIST(V)                                                          \
1598   V(f32_min, FloatMin, DoubleRegister, DoubleRegister, DoubleRegister, , , ,   \
1599     USE, , void)                                                               \
1600   V(f32_max, FloatMax, DoubleRegister, DoubleRegister, DoubleRegister, , , ,   \
1601     USE, , void)                                                               \
1602   V(f64_min, DoubleMin, DoubleRegister, DoubleRegister, DoubleRegister, , , ,  \
1603     USE, , void)                                                               \
1604   V(f64_max, DoubleMax, DoubleRegister, DoubleRegister, DoubleRegister, , , ,  \
1605     USE, , void)                                                               \
1606   V(f64_add, AddF64, DoubleRegister, DoubleRegister, DoubleRegister, , , ,     \
1607     USE, , void)                                                               \
1608   V(f64_sub, SubF64, DoubleRegister, DoubleRegister, DoubleRegister, , , ,     \
1609     USE, , void)                                                               \
1610   V(f64_mul, MulF64, DoubleRegister, DoubleRegister, DoubleRegister, , , ,     \
1611     USE, , void)                                                               \
1612   V(f64_div, DivF64, DoubleRegister, DoubleRegister, DoubleRegister, , , ,     \
1613     USE, , void)                                                               \
1614   V(f32_add, AddF32, DoubleRegister, DoubleRegister, DoubleRegister, , , ,     \
1615     USE, , void)                                                               \
1616   V(f32_sub, SubF32, DoubleRegister, DoubleRegister, DoubleRegister, , , ,     \
1617     USE, , void)                                                               \
1618   V(f32_mul, MulF32, DoubleRegister, DoubleRegister, DoubleRegister, , , ,     \
1619     USE, , void)                                                               \
1620   V(f32_div, DivF32, DoubleRegister, DoubleRegister, DoubleRegister, , , ,     \
1621     USE, , void)                                                               \
1622   V(i32_shli, ShiftLeftU32, Register, Register, int32_t, , ,                   \
1623     INT32_AND_WITH_1F, SIGN_EXT, , void)                                       \
1624   V(i32_sari, ShiftRightS32, Register, Register, int32_t, , ,                  \
1625     INT32_AND_WITH_1F, SIGN_EXT, , void)                                       \
1626   V(i32_shri, ShiftRightU32, Register, Register, int32_t, , ,                  \
1627     INT32_AND_WITH_1F, SIGN_EXT, , void)                                       \
1628   V(i32_shl, ShiftLeftU32, Register, Register, Register, , ,                   \
1629     REGISTER_AND_WITH_1F, SIGN_EXT, , void)                                    \
1630   V(i32_sar, ShiftRightS32, Register, Register, Register, , ,                  \
1631     REGISTER_AND_WITH_1F, SIGN_EXT, , void)                                    \
1632   V(i32_shr, ShiftRightU32, Register, Register, Register, , ,                  \
1633     REGISTER_AND_WITH_1F, SIGN_EXT, , void)                                    \
1634   V(i32_addi, AddS32, Register, Register, int32_t, , , Operand, SIGN_EXT, ,    \
1635     void)                                                                      \
1636   V(i32_subi, SubS32, Register, Register, int32_t, , , Operand, SIGN_EXT, ,    \
1637     void)                                                                      \
1638   V(i32_andi, And, Register, Register, int32_t, , , Operand, SIGN_EXT, , void) \
1639   V(i32_ori, Or, Register, Register, int32_t, , , Operand, SIGN_EXT, , void)   \
1640   V(i32_xori, Xor, Register, Register, int32_t, , , Operand, SIGN_EXT, , void) \
1641   V(i32_add, AddS32, Register, Register, Register, , , , SIGN_EXT, , void)     \
1642   V(i32_sub, SubS32, Register, Register, Register, , , , SIGN_EXT, , void)     \
1643   V(i32_and, And, Register, Register, Register, , , , SIGN_EXT, , void)        \
1644   V(i32_or, Or, Register, Register, Register, , , , SIGN_EXT, , void)          \
1645   V(i32_xor, Xor, Register, Register, Register, , , , SIGN_EXT, , void)        \
1646   V(i32_mul, MulS32, Register, Register, Register, , , , SIGN_EXT, , void)     \
1647   V(i64_add, AddS64, LiftoffRegister, LiftoffRegister, LiftoffRegister,        \
1648     LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void)                           \
1649   V(i64_sub, SubS64, LiftoffRegister, LiftoffRegister, LiftoffRegister,        \
1650     LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void)                           \
1651   V(i64_mul, MulS64, LiftoffRegister, LiftoffRegister, LiftoffRegister,        \
1652     LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void)                           \
1653   V(i64_and, AndP, LiftoffRegister, LiftoffRegister, LiftoffRegister,          \
1654     LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void)                           \
1655   V(i64_or, OrP, LiftoffRegister, LiftoffRegister, LiftoffRegister,            \
1656     LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void)                           \
1657   V(i64_xor, XorP, LiftoffRegister, LiftoffRegister, LiftoffRegister,          \
1658     LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void)                           \
1659   V(i64_shl, ShiftLeftU64, LiftoffRegister, LiftoffRegister, Register,         \
1660     LFR_TO_REG, LFR_TO_REG, , USE, , void)                                     \
1661   V(i64_sar, ShiftRightS64, LiftoffRegister, LiftoffRegister, Register,        \
1662     LFR_TO_REG, LFR_TO_REG, , USE, , void)                                     \
1663   V(i64_shr, ShiftRightU64, LiftoffRegister, LiftoffRegister, Register,        \
1664     LFR_TO_REG, LFR_TO_REG, , USE, , void)                                     \
1665   V(i64_addi, AddS64, LiftoffRegister, LiftoffRegister, int64_t, LFR_TO_REG,   \
1666     LFR_TO_REG, Operand, USE, , void)                                          \
1667   V(i64_andi, AndP, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG,     \
1668     LFR_TO_REG, Operand, USE, , void)                                          \
1669   V(i64_ori, OrP, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG,       \
1670     LFR_TO_REG, Operand, USE, , void)                                          \
1671   V(i64_xori, XorP, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG,     \
1672     LFR_TO_REG, Operand, USE, , void)                                          \
1673   V(i64_shli, ShiftLeftU64, LiftoffRegister, LiftoffRegister, int32_t,         \
1674     LFR_TO_REG, LFR_TO_REG, Operand, USE, , void)                              \
1675   V(i64_sari, ShiftRightS64, LiftoffRegister, LiftoffRegister, int32_t,        \
1676     LFR_TO_REG, LFR_TO_REG, Operand, USE, , void)                              \
1677   V(i64_shri, ShiftRightU64, LiftoffRegister, LiftoffRegister, int32_t,        \
1678     LFR_TO_REG, LFR_TO_REG, Operand, USE, , void)
1679 
1680 #define EMIT_BINOP_FUNCTION(name, instr, dtype, stype1, stype2, dcast, scast1, \
1681                             scast2, rcast, ret, return_type)                   \
1682   return_type LiftoffAssembler::emit_##name(dtype dst, stype1 lhs,             \
1683                                             stype2 rhs) {                      \
1684     auto _dst = dcast(dst);                                                    \
1685     auto _lhs = scast1(lhs);                                                   \
1686     auto _rhs = scast2(rhs);                                                   \
1687     instr(_dst, _lhs, _rhs);                                                   \
1688     rcast(_dst);                                                               \
1689     return ret;                                                                \
1690   }
1691 
BINOP_LIST(EMIT_BINOP_FUNCTION)1692 BINOP_LIST(EMIT_BINOP_FUNCTION)
1693 #undef BINOP_LIST
1694 #undef EMIT_BINOP_FUNCTION
1695 #undef SIGN_EXT
1696 #undef INT32_AND_WITH_1F
1697 #undef REGISTER_AND_WITH_1F
1698 #undef LFR_TO_REG
1699 
1700 void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) {
1701   UseScratchRegisterScope temps(this);
1702   if (COMPRESS_POINTERS_BOOL) {
1703     DCHECK(SmiValuesAre31Bits());
1704     Register scratch = temps.Acquire();
1705     LoadS32(scratch, MemOperand(dst.gp(), offset));
1706     AddU32(scratch, Operand(Smi::FromInt(1)));
1707     StoreU32(scratch, MemOperand(dst.gp(), offset));
1708   } else {
1709     Register scratch = temps.Acquire();
1710     SmiUntag(scratch, MemOperand(dst.gp(), offset));
1711     AddU64(scratch, Operand(1));
1712     SmiTag(scratch);
1713     StoreU64(scratch, MemOperand(dst.gp(), offset));
1714   }
1715 }
1716 
emit_i32_divs(Register dst,Register lhs,Register rhs,Label * trap_div_by_zero,Label * trap_div_unrepresentable)1717 void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
1718                                      Label* trap_div_by_zero,
1719                                      Label* trap_div_unrepresentable) {
1720   Label cont;
1721 
1722   // Check for division by zero.
1723   ltr(r0, rhs);
1724   b(eq, trap_div_by_zero);
1725 
1726   // Check for kMinInt / -1. This is unrepresentable.
1727   CmpS32(rhs, Operand(-1));
1728   bne(&cont);
1729   CmpS32(lhs, Operand(kMinInt));
1730   b(eq, trap_div_unrepresentable);
1731 
1732   bind(&cont);
1733   DivS32(dst, lhs, rhs);
1734 }
1735 
emit_i32_divu(Register dst,Register lhs,Register rhs,Label * trap_div_by_zero)1736 void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
1737                                      Label* trap_div_by_zero) {
1738   // Check for division by zero.
1739   ltr(r0, rhs);
1740   beq(trap_div_by_zero);
1741   DivU32(dst, lhs, rhs);
1742 }
1743 
emit_i32_rems(Register dst,Register lhs,Register rhs,Label * trap_div_by_zero)1744 void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
1745                                      Label* trap_div_by_zero) {
1746   Label cont;
1747   Label done;
1748   Label trap_div_unrepresentable;
1749   // Check for division by zero.
1750   ltr(r0, rhs);
1751   beq(trap_div_by_zero);
1752 
1753   // Check kMinInt/-1 case.
1754   CmpS32(rhs, Operand(-1));
1755   bne(&cont);
1756   CmpS32(lhs, Operand(kMinInt));
1757   beq(&trap_div_unrepresentable);
1758 
1759   // Continue noraml calculation.
1760   bind(&cont);
1761   ModS32(dst, lhs, rhs);
1762   bne(&done);
1763 
1764   // trap by kMinInt/-1 case.
1765   bind(&trap_div_unrepresentable);
1766   mov(dst, Operand(0));
1767   bind(&done);
1768 }
1769 
emit_i32_remu(Register dst,Register lhs,Register rhs,Label * trap_div_by_zero)1770 void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
1771                                      Label* trap_div_by_zero) {
1772   // Check for division by zero.
1773   ltr(r0, rhs);
1774   beq(trap_div_by_zero);
1775   ModU32(dst, lhs, rhs);
1776 }
1777 
emit_i64_divs(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs,Label * trap_div_by_zero,Label * trap_div_unrepresentable)1778 bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
1779                                      LiftoffRegister rhs,
1780                                      Label* trap_div_by_zero,
1781                                      Label* trap_div_unrepresentable) {
1782   // Use r0 to check for kMinInt / -1.
1783   constexpr int64_t kMinInt64 = static_cast<int64_t>(1) << 63;
1784   Label cont;
1785   // Check for division by zero.
1786   ltgr(r0, rhs.gp());
1787   beq(trap_div_by_zero);
1788 
1789   // Check for kMinInt / -1. This is unrepresentable.
1790   CmpS64(rhs.gp(), Operand(-1));
1791   bne(&cont);
1792   mov(r0, Operand(kMinInt64));
1793   CmpS64(lhs.gp(), r0);
1794   b(eq, trap_div_unrepresentable);
1795 
1796   bind(&cont);
1797   DivS64(dst.gp(), lhs.gp(), rhs.gp());
1798   return true;
1799 }
1800 
emit_i64_divu(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs,Label * trap_div_by_zero)1801 bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
1802                                      LiftoffRegister rhs,
1803                                      Label* trap_div_by_zero) {
1804   ltgr(r0, rhs.gp());
1805   b(eq, trap_div_by_zero);
1806   // Do div.
1807   DivU64(dst.gp(), lhs.gp(), rhs.gp());
1808   return true;
1809 }
1810 
emit_i64_rems(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs,Label * trap_div_by_zero)1811 bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
1812                                      LiftoffRegister rhs,
1813                                      Label* trap_div_by_zero) {
1814   constexpr int64_t kMinInt64 = static_cast<int64_t>(1) << 63;
1815 
1816   Label trap_div_unrepresentable;
1817   Label done;
1818   Label cont;
1819 
1820   // Check for division by zero.
1821   ltgr(r0, rhs.gp());
1822   beq(trap_div_by_zero);
1823 
1824   // Check for kMinInt / -1. This is unrepresentable.
1825   CmpS64(rhs.gp(), Operand(-1));
1826   bne(&cont);
1827   mov(r0, Operand(kMinInt64));
1828   CmpS64(lhs.gp(), r0);
1829   beq(&trap_div_unrepresentable);
1830 
1831   bind(&cont);
1832   ModS64(dst.gp(), lhs.gp(), rhs.gp());
1833   bne(&done);
1834 
1835   bind(&trap_div_unrepresentable);
1836   mov(dst.gp(), Operand(0));
1837   bind(&done);
1838   return true;
1839 }
1840 
emit_i64_remu(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs,Label * trap_div_by_zero)1841 bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
1842                                      LiftoffRegister rhs,
1843                                      Label* trap_div_by_zero) {
1844   // Check for division by zero.
1845   ltgr(r0, rhs.gp());
1846   beq(trap_div_by_zero);
1847   ModU64(dst.gp(), lhs.gp(), rhs.gp());
1848   return true;
1849 }
1850 
emit_f32_copysign(DoubleRegister dst,DoubleRegister lhs,DoubleRegister rhs)1851 void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
1852                                          DoubleRegister rhs) {
1853   constexpr uint64_t kF64SignBit = uint64_t{1} << 63;
1854   UseScratchRegisterScope temps(this);
1855   Register scratch2 = temps.Acquire();
1856   MovDoubleToInt64(r0, lhs);
1857   // Clear sign bit in {r0}.
1858   AndP(r0, Operand(~kF64SignBit));
1859 
1860   MovDoubleToInt64(scratch2, rhs);
1861   // Isolate sign bit in {scratch2}.
1862   AndP(scratch2, Operand(kF64SignBit));
1863   // Combine {scratch2} into {r0}.
1864   OrP(r0, r0, scratch2);
1865   MovInt64ToDouble(dst, r0);
1866 }
1867 
emit_f64_copysign(DoubleRegister dst,DoubleRegister lhs,DoubleRegister rhs)1868 void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
1869                                          DoubleRegister rhs) {
1870   constexpr uint64_t kF64SignBit = uint64_t{1} << 63;
1871   UseScratchRegisterScope temps(this);
1872   Register scratch2 = temps.Acquire();
1873   MovDoubleToInt64(r0, lhs);
1874   // Clear sign bit in {r0}.
1875   AndP(r0, Operand(~kF64SignBit));
1876 
1877   MovDoubleToInt64(scratch2, rhs);
1878   // Isolate sign bit in {scratch2}.
1879   AndP(scratch2, Operand(kF64SignBit));
1880   // Combine {scratch2} into {r0}.
1881   OrP(r0, r0, scratch2);
1882   MovInt64ToDouble(dst, r0);
1883 }
1884 
emit_type_conversion(WasmOpcode opcode,LiftoffRegister dst,LiftoffRegister src,Label * trap)1885 bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
1886                                             LiftoffRegister dst,
1887                                             LiftoffRegister src, Label* trap) {
1888   switch (opcode) {
1889     case kExprI32ConvertI64:
1890       lgfr(dst.gp(), src.gp());
1891       return true;
1892     case kExprI32SConvertF32: {
1893       ConvertFloat32ToInt32(dst.gp(), src.fp(),
1894                             kRoundToZero);  // f32 -> i32 round to zero.
1895       b(Condition(1), trap);
1896       return true;
1897     }
1898     case kExprI32UConvertF32: {
1899       ConvertFloat32ToUnsignedInt32(dst.gp(), src.fp(), kRoundToZero);
1900       b(Condition(1), trap);
1901       return true;
1902     }
1903     case kExprI32SConvertF64: {
1904       ConvertDoubleToInt32(dst.gp(), src.fp());
1905       b(Condition(1), trap);
1906       return true;
1907     }
1908     case kExprI32UConvertF64: {
1909       ConvertDoubleToUnsignedInt32(dst.gp(), src.fp(), kRoundToZero);
1910       b(Condition(1), trap);
1911       return true;
1912     }
1913     case kExprI32SConvertSatF32: {
1914       Label done, src_is_nan;
1915       lzer(kScratchDoubleReg);
1916       cebr(src.fp(), kScratchDoubleReg);
1917       b(Condition(1), &src_is_nan);
1918 
1919       // source is a finite number
1920       ConvertFloat32ToInt32(dst.gp(), src.fp(),
1921                             kRoundToZero);  // f32 -> i32 round to zero.
1922       b(&done);
1923 
1924       bind(&src_is_nan);
1925       lghi(dst.gp(), Operand::Zero());
1926 
1927       bind(&done);
1928       return true;
1929     }
1930     case kExprI32UConvertSatF32: {
1931       Label done, src_is_nan;
1932       lzer(kScratchDoubleReg);
1933       cebr(src.fp(), kScratchDoubleReg);
1934       b(Condition(1), &src_is_nan);
1935 
1936       // source is a finite number
1937       ConvertFloat32ToUnsignedInt32(dst.gp(), src.fp(), kRoundToZero);
1938       b(&done);
1939 
1940       bind(&src_is_nan);
1941       lghi(dst.gp(), Operand::Zero());
1942 
1943       bind(&done);
1944       return true;
1945     }
1946     case kExprI32SConvertSatF64: {
1947       Label done, src_is_nan;
1948       lzdr(kScratchDoubleReg, r0);
1949       cdbr(src.fp(), kScratchDoubleReg);
1950       b(Condition(1), &src_is_nan);
1951 
1952       ConvertDoubleToInt32(dst.gp(), src.fp());
1953       b(&done);
1954 
1955       bind(&src_is_nan);
1956       lghi(dst.gp(), Operand::Zero());
1957 
1958       bind(&done);
1959       return true;
1960     }
1961     case kExprI32UConvertSatF64: {
1962       Label done, src_is_nan;
1963       lzdr(kScratchDoubleReg, r0);
1964       cdbr(src.fp(), kScratchDoubleReg);
1965       b(Condition(1), &src_is_nan);
1966 
1967       ConvertDoubleToUnsignedInt32(dst.gp(), src.fp());
1968       b(&done);
1969 
1970       bind(&src_is_nan);
1971       lghi(dst.gp(), Operand::Zero());
1972 
1973       bind(&done);
1974       return true;
1975     }
1976     case kExprI32ReinterpretF32:
1977       lgdr(dst.gp(), src.fp());
1978       srlg(dst.gp(), dst.gp(), Operand(32));
1979       return true;
1980     case kExprI64SConvertI32:
1981       LoadS32(dst.gp(), src.gp());
1982       return true;
1983     case kExprI64UConvertI32:
1984       llgfr(dst.gp(), src.gp());
1985       return true;
1986     case kExprI64ReinterpretF64:
1987       lgdr(dst.gp(), src.fp());
1988       return true;
1989     case kExprF32SConvertI32: {
1990       ConvertIntToFloat(dst.fp(), src.gp());
1991       return true;
1992     }
1993     case kExprF32UConvertI32: {
1994       ConvertUnsignedIntToFloat(dst.fp(), src.gp());
1995       return true;
1996     }
1997     case kExprF32ConvertF64:
1998       ledbr(dst.fp(), src.fp());
1999       return true;
2000     case kExprF32ReinterpretI32: {
2001       sllg(r0, src.gp(), Operand(32));
2002       ldgr(dst.fp(), r0);
2003       return true;
2004     }
2005     case kExprF64SConvertI32: {
2006       ConvertIntToDouble(dst.fp(), src.gp());
2007       return true;
2008     }
2009     case kExprF64UConvertI32: {
2010       ConvertUnsignedIntToDouble(dst.fp(), src.gp());
2011       return true;
2012     }
2013     case kExprF64ConvertF32:
2014       ldebr(dst.fp(), src.fp());
2015       return true;
2016     case kExprF64ReinterpretI64:
2017       ldgr(dst.fp(), src.gp());
2018       return true;
2019     case kExprF64SConvertI64:
2020       ConvertInt64ToDouble(dst.fp(), src.gp());
2021       return true;
2022     case kExprF64UConvertI64:
2023       ConvertUnsignedInt64ToDouble(dst.fp(), src.gp());
2024       return true;
2025     case kExprI64SConvertF32: {
2026       ConvertFloat32ToInt64(dst.gp(), src.fp());  // f32 -> i64 round to zero.
2027       b(Condition(1), trap);
2028       return true;
2029     }
2030     case kExprI64UConvertF32: {
2031       ConvertFloat32ToUnsignedInt64(dst.gp(),
2032                                     src.fp());  // f32 -> i64 round to zero.
2033       b(Condition(1), trap);
2034       return true;
2035     }
2036     case kExprF32SConvertI64:
2037       ConvertInt64ToFloat(dst.fp(), src.gp());
2038       return true;
2039     case kExprF32UConvertI64:
2040       ConvertUnsignedInt64ToFloat(dst.fp(), src.gp());
2041       return true;
2042     case kExprI64SConvertF64: {
2043       ConvertDoubleToInt64(dst.gp(), src.fp());  // f64 -> i64 round to zero.
2044       b(Condition(1), trap);
2045       return true;
2046     }
2047     case kExprI64UConvertF64: {
2048       ConvertDoubleToUnsignedInt64(dst.gp(),
2049                                    src.fp());  // f64 -> i64 round to zero.
2050       b(Condition(1), trap);
2051       return true;
2052     }
2053     case kExprI64SConvertSatF32: {
2054       Label done, src_is_nan;
2055       lzer(kScratchDoubleReg);
2056       cebr(src.fp(), kScratchDoubleReg);
2057       b(Condition(1), &src_is_nan);
2058 
2059       // source is a finite number
2060       ConvertFloat32ToInt64(dst.gp(), src.fp());  // f32 -> i64 round to zero.
2061       b(&done);
2062 
2063       bind(&src_is_nan);
2064       lghi(dst.gp(), Operand::Zero());
2065 
2066       bind(&done);
2067       return true;
2068     }
2069     case kExprI64UConvertSatF32: {
2070       Label done, src_is_nan;
2071       lzer(kScratchDoubleReg);
2072       cebr(src.fp(), kScratchDoubleReg);
2073       b(Condition(1), &src_is_nan);
2074 
2075       // source is a finite number
2076       ConvertFloat32ToUnsignedInt64(dst.gp(),
2077                                     src.fp());  // f32 -> i64 round to zero.
2078       b(&done);
2079 
2080       bind(&src_is_nan);
2081       lghi(dst.gp(), Operand::Zero());
2082 
2083       bind(&done);
2084       return true;
2085     }
2086     case kExprI64SConvertSatF64: {
2087       Label done, src_is_nan;
2088       lzdr(kScratchDoubleReg, r0);
2089       cdbr(src.fp(), kScratchDoubleReg);
2090       b(Condition(1), &src_is_nan);
2091 
2092       ConvertDoubleToInt64(dst.gp(), src.fp());  // f64 -> i64 round to zero.
2093       b(&done);
2094 
2095       bind(&src_is_nan);
2096       lghi(dst.gp(), Operand::Zero());
2097 
2098       bind(&done);
2099       return true;
2100     }
2101     case kExprI64UConvertSatF64: {
2102       Label done, src_is_nan;
2103       lzdr(kScratchDoubleReg, r0);
2104       cdbr(src.fp(), kScratchDoubleReg);
2105       b(Condition(1), &src_is_nan);
2106 
2107       ConvertDoubleToUnsignedInt64(dst.gp(),
2108                                    src.fp());  // f64 -> i64 round to zero.
2109       b(&done);
2110 
2111       bind(&src_is_nan);
2112       lghi(dst.gp(), Operand::Zero());
2113 
2114       bind(&done);
2115       return true;
2116     }
2117     default:
2118       UNREACHABLE();
2119   }
2120 }
2121 
emit_jump(Label * label)2122 void LiftoffAssembler::emit_jump(Label* label) { b(al, label); }
2123 
emit_jump(Register target)2124 void LiftoffAssembler::emit_jump(Register target) { Jump(target); }
2125 
emit_cond_jump(LiftoffCondition liftoff_cond,Label * label,ValueKind kind,Register lhs,Register rhs)2126 void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
2127                                       Label* label, ValueKind kind,
2128                                       Register lhs, Register rhs) {
2129   Condition cond = liftoff::ToCondition(liftoff_cond);
2130   bool use_signed = liftoff::UseSignedOp(liftoff_cond);
2131 
2132   if (rhs != no_reg) {
2133     switch (kind) {
2134       case kI32:
2135         if (use_signed) {
2136           CmpS32(lhs, rhs);
2137         } else {
2138           CmpU32(lhs, rhs);
2139         }
2140         break;
2141       case kRef:
2142       case kOptRef:
2143       case kRtt:
2144         DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
2145         V8_FALLTHROUGH;
2146       case kI64:
2147         if (use_signed) {
2148           CmpS64(lhs, rhs);
2149         } else {
2150           CmpU64(lhs, rhs);
2151         }
2152         break;
2153       default:
2154         UNREACHABLE();
2155     }
2156   } else {
2157     DCHECK_EQ(kind, kI32);
2158     CHECK(use_signed);
2159     CmpS32(lhs, Operand::Zero());
2160   }
2161 
2162   b(cond, label);
2163 }
2164 
emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,Label * label,Register lhs,int32_t imm)2165 void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
2166                                            Label* label, Register lhs,
2167                                            int32_t imm) {
2168   bool use_signed = liftoff::UseSignedOp(liftoff_cond);
2169   Condition cond = liftoff::ToCondition(liftoff_cond);
2170   if (use_signed) {
2171     CmpS32(lhs, Operand(imm));
2172   } else {
2173     CmpU32(lhs, Operand(imm));
2174   }
2175   b(cond, label);
2176 }
2177 
2178 #define EMIT_EQZ(test, src) \
2179   {                         \
2180     Label done;             \
2181     test(r0, src);          \
2182     mov(dst, Operand(1));   \
2183     beq(&done);             \
2184     mov(dst, Operand(0));   \
2185     bind(&done);            \
2186   }
2187 
emit_i32_subi_jump_negative(Register value,int subtrahend,Label * result_negative)2188 void LiftoffAssembler::emit_i32_subi_jump_negative(Register value,
2189                                                    int subtrahend,
2190                                                    Label* result_negative) {
2191   SubS64(value, value, Operand(subtrahend));
2192   blt(result_negative);
2193 }
2194 
emit_i32_eqz(Register dst,Register src)2195 void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
2196   EMIT_EQZ(ltr, src);
2197 }
2198 
2199 #define EMIT_SET_CONDITION(dst, cond) \
2200   {                                   \
2201     Label done;                       \
2202     lghi(dst, Operand(1));            \
2203     b(cond, &done);                   \
2204     lghi(dst, Operand(0));            \
2205     bind(&done);                      \
2206   }
2207 
emit_i32_set_cond(LiftoffCondition liftoff_cond,Register dst,Register lhs,Register rhs)2208 void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
2209                                          Register dst, Register lhs,
2210                                          Register rhs) {
2211   bool use_signed = liftoff::UseSignedOp(liftoff_cond);
2212   if (use_signed) {
2213     CmpS32(lhs, rhs);
2214   } else {
2215     CmpU32(lhs, rhs);
2216   }
2217 
2218   EMIT_SET_CONDITION(dst, liftoff::ToCondition(liftoff_cond));
2219 }
2220 
emit_i64_eqz(Register dst,LiftoffRegister src)2221 void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
2222   EMIT_EQZ(ltgr, src.gp());
2223 }
2224 
emit_i64_set_cond(LiftoffCondition liftoff_cond,Register dst,LiftoffRegister lhs,LiftoffRegister rhs)2225 void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
2226                                          Register dst, LiftoffRegister lhs,
2227                                          LiftoffRegister rhs) {
2228   bool use_signed = liftoff::UseSignedOp(liftoff_cond);
2229   if (use_signed) {
2230     CmpS64(lhs.gp(), rhs.gp());
2231   } else {
2232     CmpU64(lhs.gp(), rhs.gp());
2233   }
2234 
2235   EMIT_SET_CONDITION(dst, liftoff::ToCondition(liftoff_cond));
2236 }
2237 
emit_f32_set_cond(LiftoffCondition liftoff_cond,Register dst,DoubleRegister lhs,DoubleRegister rhs)2238 void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
2239                                          Register dst, DoubleRegister lhs,
2240                                          DoubleRegister rhs) {
2241   cebr(lhs, rhs);
2242   EMIT_SET_CONDITION(dst, liftoff::ToCondition(liftoff_cond));
2243 }
2244 
emit_f64_set_cond(LiftoffCondition liftoff_cond,Register dst,DoubleRegister lhs,DoubleRegister rhs)2245 void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
2246                                          Register dst, DoubleRegister lhs,
2247                                          DoubleRegister rhs) {
2248   cdbr(lhs, rhs);
2249   EMIT_SET_CONDITION(dst, liftoff::ToCondition(liftoff_cond));
2250 }
2251 
emit_select(LiftoffRegister dst,Register condition,LiftoffRegister true_value,LiftoffRegister false_value,ValueKind kind)2252 bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
2253                                    LiftoffRegister true_value,
2254                                    LiftoffRegister false_value,
2255                                    ValueKind kind) {
2256   return false;
2257 }
2258 
emit_smi_check(Register obj,Label * target,SmiCheckMode mode)2259 void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
2260                                       SmiCheckMode mode) {
2261   TestIfSmi(obj);
2262   Condition condition = mode == kJumpOnSmi ? eq : ne;
2263   b(condition, target);  // branch if SMI
2264 }
2265 
2266 #define SIMD_BINOP_RR_LIST(V)                        \
2267   V(f64x2_add, F64x2Add)                             \
2268   V(f64x2_sub, F64x2Sub)                             \
2269   V(f64x2_mul, F64x2Mul)                             \
2270   V(f64x2_div, F64x2Div)                             \
2271   V(f64x2_min, F64x2Min)                             \
2272   V(f64x2_max, F64x2Max)                             \
2273   V(f64x2_eq, F64x2Eq)                               \
2274   V(f64x2_ne, F64x2Ne)                               \
2275   V(f64x2_lt, F64x2Lt)                               \
2276   V(f64x2_le, F64x2Le)                               \
2277   V(f64x2_pmin, F64x2Pmin)                           \
2278   V(f64x2_pmax, F64x2Pmax)                           \
2279   V(f32x4_add, F32x4Add)                             \
2280   V(f32x4_sub, F32x4Sub)                             \
2281   V(f32x4_mul, F32x4Mul)                             \
2282   V(f32x4_div, F32x4Div)                             \
2283   V(f32x4_min, F32x4Min)                             \
2284   V(f32x4_max, F32x4Max)                             \
2285   V(f32x4_eq, F32x4Eq)                               \
2286   V(f32x4_ne, F32x4Ne)                               \
2287   V(f32x4_lt, F32x4Lt)                               \
2288   V(f32x4_le, F32x4Le)                               \
2289   V(f32x4_pmin, F32x4Pmin)                           \
2290   V(f32x4_pmax, F32x4Pmax)                           \
2291   V(i64x2_add, I64x2Add)                             \
2292   V(i64x2_sub, I64x2Sub)                             \
2293   V(i64x2_eq, I64x2Eq)                               \
2294   V(i64x2_ne, I64x2Ne)                               \
2295   V(i64x2_gt_s, I64x2GtS)                            \
2296   V(i64x2_ge_s, I64x2GeS)                            \
2297   V(i32x4_add, I32x4Add)                             \
2298   V(i32x4_sub, I32x4Sub)                             \
2299   V(i32x4_mul, I32x4Mul)                             \
2300   V(i32x4_eq, I32x4Eq)                               \
2301   V(i32x4_ne, I32x4Ne)                               \
2302   V(i32x4_gt_s, I32x4GtS)                            \
2303   V(i32x4_ge_s, I32x4GeS)                            \
2304   V(i32x4_gt_u, I32x4GtU)                            \
2305   V(i32x4_min_s, I32x4MinS)                          \
2306   V(i32x4_min_u, I32x4MinU)                          \
2307   V(i32x4_max_s, I32x4MaxS)                          \
2308   V(i32x4_max_u, I32x4MaxU)                          \
2309   V(i16x8_add, I16x8Add)                             \
2310   V(i16x8_sub, I16x8Sub)                             \
2311   V(i16x8_mul, I16x8Mul)                             \
2312   V(i16x8_eq, I16x8Eq)                               \
2313   V(i16x8_ne, I16x8Ne)                               \
2314   V(i16x8_gt_s, I16x8GtS)                            \
2315   V(i16x8_ge_s, I16x8GeS)                            \
2316   V(i16x8_gt_u, I16x8GtU)                            \
2317   V(i16x8_min_s, I16x8MinS)                          \
2318   V(i16x8_min_u, I16x8MinU)                          \
2319   V(i16x8_max_s, I16x8MaxS)                          \
2320   V(i16x8_max_u, I16x8MaxU)                          \
2321   V(i16x8_rounding_average_u, I16x8RoundingAverageU) \
2322   V(i8x16_add, I8x16Add)                             \
2323   V(i8x16_sub, I8x16Sub)                             \
2324   V(i8x16_eq, I8x16Eq)                               \
2325   V(i8x16_ne, I8x16Ne)                               \
2326   V(i8x16_gt_s, I8x16GtS)                            \
2327   V(i8x16_ge_s, I8x16GeS)                            \
2328   V(i8x16_gt_u, I8x16GtU)                            \
2329   V(i8x16_min_s, I8x16MinS)                          \
2330   V(i8x16_min_u, I8x16MinU)                          \
2331   V(i8x16_max_s, I8x16MaxS)                          \
2332   V(i8x16_max_u, I8x16MaxU)                          \
2333   V(i8x16_rounding_average_u, I8x16RoundingAverageU) \
2334   V(s128_and, S128And)                               \
2335   V(s128_or, S128Or)                                 \
2336   V(s128_xor, S128Xor)                               \
2337   V(s128_and_not, S128AndNot)
2338 
2339 #define EMIT_SIMD_BINOP_RR(name, op)                                           \
2340   void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
2341                                      LiftoffRegister rhs) {                    \
2342     op(dst.fp(), lhs.fp(), rhs.fp());                                          \
2343   }
2344 SIMD_BINOP_RR_LIST(EMIT_SIMD_BINOP_RR)
2345 #undef EMIT_SIMD_BINOP_RR
2346 #undef SIMD_BINOP_RR_LIST
2347 
2348 #define SIMD_SHIFT_RR_LIST(V) \
2349   V(i64x2_shl, I64x2Shl)      \
2350   V(i64x2_shr_s, I64x2ShrS)   \
2351   V(i64x2_shr_u, I64x2ShrU)   \
2352   V(i32x4_shl, I32x4Shl)      \
2353   V(i32x4_shr_s, I32x4ShrS)   \
2354   V(i32x4_shr_u, I32x4ShrU)   \
2355   V(i16x8_shl, I16x8Shl)      \
2356   V(i16x8_shr_s, I16x8ShrS)   \
2357   V(i16x8_shr_u, I16x8ShrU)   \
2358   V(i8x16_shl, I8x16Shl)      \
2359   V(i8x16_shr_s, I8x16ShrS)   \
2360   V(i8x16_shr_u, I8x16ShrU)
2361 
2362 #define EMIT_SIMD_SHIFT_RR(name, op)                                           \
2363   void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
2364                                      LiftoffRegister rhs) {                    \
2365     op(dst.fp(), lhs.fp(), rhs.gp(), kScratchDoubleReg);                       \
2366   }
SIMD_SHIFT_RR_LIST(EMIT_SIMD_SHIFT_RR)2367 SIMD_SHIFT_RR_LIST(EMIT_SIMD_SHIFT_RR)
2368 #undef EMIT_SIMD_SHIFT_RR
2369 #undef SIMD_SHIFT_RR_LIST
2370 
2371 #define SIMD_SHIFT_RI_LIST(V) \
2372   V(i64x2_shli, I64x2Shl)     \
2373   V(i64x2_shri_s, I64x2ShrS)  \
2374   V(i64x2_shri_u, I64x2ShrU)  \
2375   V(i32x4_shli, I32x4Shl)     \
2376   V(i32x4_shri_s, I32x4ShrS)  \
2377   V(i32x4_shri_u, I32x4ShrU)  \
2378   V(i16x8_shli, I16x8Shl)     \
2379   V(i16x8_shri_s, I16x8ShrS)  \
2380   V(i16x8_shri_u, I16x8ShrU)  \
2381   V(i8x16_shli, I8x16Shl)     \
2382   V(i8x16_shri_s, I8x16ShrS)  \
2383   V(i8x16_shri_u, I8x16ShrU)
2384 
2385 #define EMIT_SIMD_SHIFT_RI(name, op)                                           \
2386   void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
2387                                      int32_t rhs) {                            \
2388     op(dst.fp(), lhs.fp(), Operand(rhs), r0, kScratchDoubleReg);               \
2389   }
2390 SIMD_SHIFT_RI_LIST(EMIT_SIMD_SHIFT_RI)
2391 #undef EMIT_SIMD_SHIFT_RI
2392 #undef SIMD_SHIFT_RI_LIST
2393 
2394 #define SIMD_UNOP_LIST(V)                                              \
2395   V(f64x2_splat, F64x2Splat, fp, fp, , void)                           \
2396   V(f64x2_abs, F64x2Abs, fp, fp, , void)                               \
2397   V(f64x2_neg, F64x2Neg, fp, fp, , void)                               \
2398   V(f64x2_sqrt, F64x2Sqrt, fp, fp, , void)                             \
2399   V(f64x2_ceil, F64x2Ceil, fp, fp, true, bool)                         \
2400   V(f64x2_floor, F64x2Floor, fp, fp, true, bool)                       \
2401   V(f64x2_trunc, F64x2Trunc, fp, fp, true, bool)                       \
2402   V(f64x2_nearest_int, F64x2NearestInt, fp, fp, true, bool)            \
2403   V(f32x4_abs, F32x4Abs, fp, fp, , void)                               \
2404   V(f32x4_splat, F32x4Splat, fp, fp, , void)                           \
2405   V(f32x4_neg, F32x4Neg, fp, fp, , void)                               \
2406   V(f32x4_sqrt, F32x4Sqrt, fp, fp, , void)                             \
2407   V(f32x4_ceil, F32x4Ceil, fp, fp, true, bool)                         \
2408   V(f32x4_floor, F32x4Floor, fp, fp, true, bool)                       \
2409   V(f32x4_trunc, F32x4Trunc, fp, fp, true, bool)                       \
2410   V(f32x4_nearest_int, F32x4NearestInt, fp, fp, true, bool)            \
2411   V(i64x2_abs, I64x2Abs, fp, fp, , void)                               \
2412   V(i64x2_splat, I64x2Splat, fp, gp, , void)                           \
2413   V(i64x2_neg, I64x2Neg, fp, fp, , void)                               \
2414   V(i64x2_sconvert_i32x4_low, I64x2SConvertI32x4Low, fp, fp, , void)   \
2415   V(i64x2_sconvert_i32x4_high, I64x2SConvertI32x4High, fp, fp, , void) \
2416   V(i64x2_uconvert_i32x4_low, I64x2UConvertI32x4Low, fp, fp, , void)   \
2417   V(i64x2_uconvert_i32x4_high, I64x2UConvertI32x4High, fp, fp, , void) \
2418   V(i32x4_abs, I32x4Abs, fp, fp, , void)                               \
2419   V(i32x4_neg, I32x4Neg, fp, fp, , void)                               \
2420   V(i32x4_splat, I32x4Splat, fp, gp, , void)                           \
2421   V(i32x4_sconvert_i16x8_low, I32x4SConvertI16x8Low, fp, fp, , void)   \
2422   V(i32x4_sconvert_i16x8_high, I32x4SConvertI16x8High, fp, fp, , void) \
2423   V(i32x4_uconvert_i16x8_low, I32x4UConvertI16x8Low, fp, fp, , void)   \
2424   V(i32x4_uconvert_i16x8_high, I32x4UConvertI16x8High, fp, fp, , void) \
2425   V(i16x8_abs, I16x8Abs, fp, fp, , void)                               \
2426   V(i16x8_neg, I16x8Neg, fp, fp, , void)                               \
2427   V(i16x8_splat, I16x8Splat, fp, gp, , void)                           \
2428   V(i16x8_sconvert_i8x16_low, I16x8SConvertI8x16Low, fp, fp, , void)   \
2429   V(i16x8_sconvert_i8x16_high, I16x8SConvertI8x16High, fp, fp, , void) \
2430   V(i16x8_uconvert_i8x16_low, I16x8UConvertI8x16Low, fp, fp, , void)   \
2431   V(i16x8_uconvert_i8x16_high, I16x8UConvertI8x16High, fp, fp, , void) \
2432   V(i8x16_abs, I8x16Abs, fp, fp, , void)                               \
2433   V(i8x16_neg, I8x16Neg, fp, fp, , void)                               \
2434   V(i8x16_splat, I8x16Splat, fp, gp, , void)                           \
2435   V(s128_not, S128Not, fp, fp, , void)
2436 
2437 #define EMIT_SIMD_UNOP(name, op, dtype, stype, return_val, return_type) \
2438   return_type LiftoffAssembler::emit_##name(LiftoffRegister dst,        \
2439                                             LiftoffRegister src) {      \
2440     op(dst.dtype(), src.stype());                                       \
2441     return return_val;                                                  \
2442   }
2443 SIMD_UNOP_LIST(EMIT_SIMD_UNOP)
2444 #undef EMIT_SIMD_UNOP
2445 #undef SIMD_UNOP_LIST
2446 
2447 #define SIMD_EXTRACT_LANE_LIST(V)                \
2448   V(f64x2_extract_lane, F64x2ExtractLane, fp)    \
2449   V(f32x4_extract_lane, F32x4ExtractLane, fp)    \
2450   V(i64x2_extract_lane, I64x2ExtractLane, gp)    \
2451   V(i32x4_extract_lane, I32x4ExtractLane, gp)    \
2452   V(i16x8_extract_lane_u, I16x8ExtractLaneU, gp) \
2453   V(i16x8_extract_lane_s, I16x8ExtractLaneS, gp) \
2454   V(i8x16_extract_lane_u, I8x16ExtractLaneU, gp) \
2455   V(i8x16_extract_lane_s, I8x16ExtractLaneS, gp)
2456 
2457 #define EMIT_SIMD_EXTRACT_LANE(name, op, dtype)                                \
2458   void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister src, \
2459                                      uint8_t imm_lane_idx) {                   \
2460     op(dst.dtype(), src.fp(), imm_lane_idx, r0);                               \
2461   }
2462 SIMD_EXTRACT_LANE_LIST(EMIT_SIMD_EXTRACT_LANE)
2463 #undef EMIT_SIMD_EXTRACT_LANE
2464 #undef SIMD_EXTRACT_LANE_LIST
2465 
2466 #define SIMD_REPLACE_LANE_LIST(V)             \
2467   V(f64x2_replace_lane, F64x2ReplaceLane, fp) \
2468   V(f32x4_replace_lane, F32x4ReplaceLane, fp) \
2469   V(i64x2_replace_lane, I64x2ReplaceLane, gp) \
2470   V(i32x4_replace_lane, I32x4ReplaceLane, gp) \
2471   V(i16x8_replace_lane, I16x8ReplaceLane, gp) \
2472   V(i8x16_replace_lane, I8x16ReplaceLane, gp)
2473 
2474 #define EMIT_SIMD_REPLACE_LANE(name, op, stype)                        \
2475   void LiftoffAssembler::emit_##name(                                  \
2476       LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, \
2477       uint8_t imm_lane_idx) {                                          \
2478     op(dst.fp(), src1.fp(), src2.stype(), imm_lane_idx, r0);           \
2479   }
2480 SIMD_REPLACE_LANE_LIST(EMIT_SIMD_REPLACE_LANE)
2481 #undef EMIT_SIMD_REPLACE_LANE
2482 #undef SIMD_REPLACE_LANE_LIST
2483 
2484 #define SIMD_EXT_MUL_LIST(V)                          \
2485   V(i64x2_extmul_low_i32x4_s, I64x2ExtMulLowI32x4S)   \
2486   V(i64x2_extmul_low_i32x4_u, I64x2ExtMulLowI32x4U)   \
2487   V(i64x2_extmul_high_i32x4_s, I64x2ExtMulHighI32x4S) \
2488   V(i64x2_extmul_high_i32x4_u, I64x2ExtMulHighI32x4U) \
2489   V(i32x4_extmul_low_i16x8_s, I32x4ExtMulLowI16x8S)   \
2490   V(i32x4_extmul_low_i16x8_u, I32x4ExtMulLowI16x8U)   \
2491   V(i32x4_extmul_high_i16x8_s, I32x4ExtMulHighI16x8S) \
2492   V(i32x4_extmul_high_i16x8_u, I32x4ExtMulHighI16x8U) \
2493   V(i16x8_extmul_low_i8x16_s, I16x8ExtMulLowI8x16S)   \
2494   V(i16x8_extmul_low_i8x16_u, I16x8ExtMulLowI8x16U)   \
2495   V(i16x8_extmul_high_i8x16_s, I16x8ExtMulHighI8x16S) \
2496   V(i16x8_extmul_high_i8x16_u, I16x8ExtMulHighI8x16U)
2497 
2498 #define EMIT_SIMD_EXT_MUL(name, op)                                      \
2499   void LiftoffAssembler::emit_##name(                                    \
2500       LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
2501     op(dst.fp(), src1.fp(), src2.fp(), kScratchDoubleReg);               \
2502   }
2503 SIMD_EXT_MUL_LIST(EMIT_SIMD_EXT_MUL)
2504 #undef EMIT_SIMD_EXT_MUL
2505 #undef SIMD_EXT_MUL_LIST
2506 
2507 #define SIMD_ALL_TRUE_LIST(V)    \
2508   V(i64x2_alltrue, I64x2AllTrue) \
2509   V(i32x4_alltrue, I32x4AllTrue) \
2510   V(i16x8_alltrue, I16x8AllTrue) \
2511   V(i8x16_alltrue, I8x16AllTrue)
2512 
2513 #define EMIT_SIMD_ALL_TRUE(name, op)                        \
2514   void LiftoffAssembler::emit_##name(LiftoffRegister dst,   \
2515                                      LiftoffRegister src) { \
2516     op(dst.gp(), src.fp(), r0, kScratchDoubleReg);          \
2517   }
2518 SIMD_ALL_TRUE_LIST(EMIT_SIMD_ALL_TRUE)
2519 #undef EMIT_SIMD_ALL_TRUE
2520 #undef SIMD_ALL_TRUE_LIST
2521 
2522 #define SIMD_ADD_SUB_SAT_LIST(V)   \
2523   V(i16x8_add_sat_s, I16x8AddSatS) \
2524   V(i16x8_sub_sat_s, I16x8SubSatS) \
2525   V(i16x8_add_sat_u, I16x8AddSatU) \
2526   V(i16x8_sub_sat_u, I16x8SubSatU) \
2527   V(i8x16_add_sat_s, I8x16AddSatS) \
2528   V(i8x16_sub_sat_s, I8x16SubSatS) \
2529   V(i8x16_add_sat_u, I8x16AddSatU) \
2530   V(i8x16_sub_sat_u, I8x16SubSatU)
2531 
2532 #define EMIT_SIMD_ADD_SUB_SAT(name, op)                                        \
2533   void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
2534                                      LiftoffRegister rhs) {                    \
2535     Simd128Register src1 = lhs.fp();                                           \
2536     Simd128Register src2 = rhs.fp();                                           \
2537     Simd128Register dest = dst.fp();                                           \
2538     /* lhs and rhs are unique based on their selection under liftoff-compiler  \
2539      * `EmitBinOp`. */                                                         \
2540     /* Make sure dst and temp are also unique. */                              \
2541     if (dest == src1 || dest == src2) {                                        \
2542       dest = GetUnusedRegister(kFpReg, LiftoffRegList{src1, src2}).fp();       \
2543     }                                                                          \
2544     Simd128Register temp =                                                     \
2545         GetUnusedRegister(kFpReg, LiftoffRegList{dest, src1, src2}).fp();      \
2546     op(dest, src1, src2, kScratchDoubleReg, temp);                             \
2547     /* Original dst register needs to be populated. */                         \
2548     if (dest != dst.fp()) {                                                    \
2549       vlr(dst.fp(), dest, Condition(0), Condition(0), Condition(0));           \
2550     }                                                                          \
2551   }
2552 SIMD_ADD_SUB_SAT_LIST(EMIT_SIMD_ADD_SUB_SAT)
2553 #undef EMIT_SIMD_ADD_SUB_SAT
2554 #undef SIMD_ADD_SUB_SAT_LIST
2555 
2556 #define SIMD_EXT_ADD_PAIRWISE_LIST(V)                         \
2557   V(i32x4_extadd_pairwise_i16x8_s, I32x4ExtAddPairwiseI16x8S) \
2558   V(i32x4_extadd_pairwise_i16x8_u, I32x4ExtAddPairwiseI16x8U) \
2559   V(i16x8_extadd_pairwise_i8x16_s, I16x8ExtAddPairwiseI8x16S) \
2560   V(i16x8_extadd_pairwise_i8x16_u, I16x8ExtAddPairwiseI8x16U)
2561 
2562 #define EMIT_SIMD_EXT_ADD_PAIRWISE(name, op)                         \
2563   void LiftoffAssembler::emit_##name(LiftoffRegister dst,            \
2564                                      LiftoffRegister src) {          \
2565     Simd128Register src1 = src.fp();                                 \
2566     Simd128Register dest = dst.fp();                                 \
2567     /* Make sure dst and temp are unique. */                         \
2568     if (dest == src1) {                                              \
2569       dest = GetUnusedRegister(kFpReg, LiftoffRegList{src1}).fp();   \
2570     }                                                                \
2571     Simd128Register temp =                                           \
2572         GetUnusedRegister(kFpReg, LiftoffRegList{dest, src1}).fp();  \
2573     op(dest, src1, kScratchDoubleReg, temp);                         \
2574     if (dest != dst.fp()) {                                          \
2575       vlr(dst.fp(), dest, Condition(0), Condition(0), Condition(0)); \
2576     }                                                                \
2577   }
2578 SIMD_EXT_ADD_PAIRWISE_LIST(EMIT_SIMD_EXT_ADD_PAIRWISE)
2579 #undef EMIT_SIMD_EXT_ADD_PAIRWISE
2580 #undef SIMD_EXT_ADD_PAIRWISE_LIST
2581 
2582 void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
2583                                      Register offset_reg, uintptr_t offset_imm,
2584                                      LoadType type,
2585                                      LoadTransformationKind transform,
2586                                      uint32_t* protected_load_pc) {
2587   if (!is_int20(offset_imm)) {
2588     mov(ip, Operand(offset_imm));
2589     if (offset_reg != no_reg) {
2590       AddS64(ip, offset_reg);
2591     }
2592     offset_reg = ip;
2593     offset_imm = 0;
2594   }
2595   MemOperand src_op =
2596       MemOperand(src_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm);
2597   *protected_load_pc = pc_offset();
2598   MachineType memtype = type.mem_type();
2599   if (transform == LoadTransformationKind::kExtend) {
2600     if (memtype == MachineType::Int8()) {
2601       LoadAndExtend8x8SLE(dst.fp(), src_op, r1);
2602     } else if (memtype == MachineType::Uint8()) {
2603       LoadAndExtend8x8ULE(dst.fp(), src_op, r1);
2604     } else if (memtype == MachineType::Int16()) {
2605       LoadAndExtend16x4SLE(dst.fp(), src_op, r1);
2606     } else if (memtype == MachineType::Uint16()) {
2607       LoadAndExtend16x4ULE(dst.fp(), src_op, r1);
2608     } else if (memtype == MachineType::Int32()) {
2609       LoadAndExtend32x2SLE(dst.fp(), src_op, r1);
2610     } else if (memtype == MachineType::Uint32()) {
2611       LoadAndExtend32x2ULE(dst.fp(), src_op, r1);
2612     }
2613   } else if (transform == LoadTransformationKind::kZeroExtend) {
2614     if (memtype == MachineType::Int32()) {
2615       LoadV32ZeroLE(dst.fp(), src_op, r1);
2616     } else {
2617       DCHECK_EQ(MachineType::Int64(), memtype);
2618       LoadV64ZeroLE(dst.fp(), src_op, r1);
2619     }
2620   } else {
2621     DCHECK_EQ(LoadTransformationKind::kSplat, transform);
2622     if (memtype == MachineType::Int8()) {
2623       LoadAndSplat8x16LE(dst.fp(), src_op, r1);
2624     } else if (memtype == MachineType::Int16()) {
2625       LoadAndSplat16x8LE(dst.fp(), src_op, r1);
2626     } else if (memtype == MachineType::Int32()) {
2627       LoadAndSplat32x4LE(dst.fp(), src_op, r1);
2628     } else if (memtype == MachineType::Int64()) {
2629       LoadAndSplat64x2LE(dst.fp(), src_op, r1);
2630     }
2631   }
2632 }
2633 
LoadLane(LiftoffRegister dst,LiftoffRegister src,Register addr,Register offset_reg,uintptr_t offset_imm,LoadType type,uint8_t laneidx,uint32_t * protected_load_pc)2634 void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
2635                                 Register addr, Register offset_reg,
2636                                 uintptr_t offset_imm, LoadType type,
2637                                 uint8_t laneidx, uint32_t* protected_load_pc) {
2638   if (!is_int20(offset_imm)) {
2639     mov(ip, Operand(offset_imm));
2640     if (offset_reg != no_reg) {
2641       AddS64(ip, offset_reg);
2642     }
2643     offset_reg = ip;
2644     offset_imm = 0;
2645   }
2646   MemOperand src_op =
2647       MemOperand(addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm);
2648 
2649   MachineType mem_type = type.mem_type();
2650   if (dst != src) {
2651     vlr(dst.fp(), src.fp(), Condition(0), Condition(0), Condition(0));
2652   }
2653 
2654   if (protected_load_pc) *protected_load_pc = pc_offset();
2655   if (mem_type == MachineType::Int8()) {
2656     LoadLane8LE(dst.fp(), src_op, 15 - laneidx, r1);
2657   } else if (mem_type == MachineType::Int16()) {
2658     LoadLane16LE(dst.fp(), src_op, 7 - laneidx, r1);
2659   } else if (mem_type == MachineType::Int32()) {
2660     LoadLane32LE(dst.fp(), src_op, 3 - laneidx, r1);
2661   } else {
2662     DCHECK_EQ(MachineType::Int64(), mem_type);
2663     LoadLane64LE(dst.fp(), src_op, 1 - laneidx, r1);
2664   }
2665 }
2666 
StoreLane(Register dst,Register offset,uintptr_t offset_imm,LiftoffRegister src,StoreType type,uint8_t lane,uint32_t * protected_store_pc)2667 void LiftoffAssembler::StoreLane(Register dst, Register offset,
2668                                  uintptr_t offset_imm, LiftoffRegister src,
2669                                  StoreType type, uint8_t lane,
2670                                  uint32_t* protected_store_pc) {
2671   if (!is_int20(offset_imm)) {
2672     mov(ip, Operand(offset_imm));
2673     if (offset != no_reg) {
2674       AddS64(ip, offset);
2675     }
2676     offset = ip;
2677     offset_imm = 0;
2678   }
2679   MemOperand dst_op =
2680       MemOperand(dst, offset == no_reg ? r0 : offset, offset_imm);
2681 
2682   if (protected_store_pc) *protected_store_pc = pc_offset();
2683 
2684   MachineRepresentation rep = type.mem_rep();
2685   if (rep == MachineRepresentation::kWord8) {
2686     StoreLane8LE(src.fp(), dst_op, 15 - lane, r1);
2687   } else if (rep == MachineRepresentation::kWord16) {
2688     StoreLane16LE(src.fp(), dst_op, 7 - lane, r1);
2689   } else if (rep == MachineRepresentation::kWord32) {
2690     StoreLane32LE(src.fp(), dst_op, 3 - lane, r1);
2691   } else {
2692     DCHECK_EQ(MachineRepresentation::kWord64, rep);
2693     StoreLane64LE(src.fp(), dst_op, 1 - lane, r1);
2694   }
2695 }
2696 
emit_i64x2_mul(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs)2697 void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
2698                                       LiftoffRegister rhs) {
2699   I64x2Mul(dst.fp(), lhs.fp(), rhs.fp(), r0, r1, ip);
2700 }
2701 
emit_i32x4_ge_u(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs)2702 void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
2703                                        LiftoffRegister rhs) {
2704   I32x4GeU(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
2705 }
2706 
emit_i16x8_ge_u(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs)2707 void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
2708                                        LiftoffRegister rhs) {
2709   I16x8GeU(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
2710 }
2711 
emit_i8x16_ge_u(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs)2712 void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
2713                                        LiftoffRegister rhs) {
2714   I8x16GeU(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
2715 }
2716 
emit_i8x16_swizzle(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs)2717 void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
2718                                           LiftoffRegister lhs,
2719                                           LiftoffRegister rhs) {
2720   Simd128Register src1 = lhs.fp();
2721   Simd128Register src2 = rhs.fp();
2722   Simd128Register dest = dst.fp();
2723   Simd128Register temp =
2724       GetUnusedRegister(kFpReg, LiftoffRegList{dest, src1, src2}).fp();
2725   I8x16Swizzle(dest, src1, src2, r0, r1, kScratchDoubleReg, temp);
2726 }
2727 
emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,LiftoffRegister src)2728 void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
2729                                                       LiftoffRegister src) {
2730   F64x2ConvertLowI32x4S(dst.fp(), src.fp());
2731 }
2732 
emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,LiftoffRegister src)2733 void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
2734                                                       LiftoffRegister src) {
2735   F64x2ConvertLowI32x4U(dst.fp(), src.fp());
2736 }
2737 
emit_f64x2_promote_low_f32x4(LiftoffRegister dst,LiftoffRegister src)2738 void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
2739                                                     LiftoffRegister src) {
2740   F64x2PromoteLowF32x4(dst.fp(), src.fp(), kScratchDoubleReg, r0, r1, ip);
2741 }
2742 
emit_i64x2_bitmask(LiftoffRegister dst,LiftoffRegister src)2743 void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
2744                                           LiftoffRegister src) {
2745   I64x2BitMask(dst.gp(), src.fp(), r0, kScratchDoubleReg);
2746 }
2747 
emit_i32x4_bitmask(LiftoffRegister dst,LiftoffRegister src)2748 void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
2749                                           LiftoffRegister src) {
2750   I32x4BitMask(dst.gp(), src.fp(), r0, kScratchDoubleReg);
2751 }
2752 
emit_i32x4_dot_i16x8_s(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs)2753 void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
2754                                               LiftoffRegister lhs,
2755                                               LiftoffRegister rhs) {
2756   I32x4DotI16x8S(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
2757 }
2758 
emit_i16x8_bitmask(LiftoffRegister dst,LiftoffRegister src)2759 void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
2760                                           LiftoffRegister src) {
2761   I16x8BitMask(dst.gp(), src.fp(), r0, kScratchDoubleReg);
2762 }
2763 
emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,LiftoffRegister src1,LiftoffRegister src2)2764 void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
2765                                                 LiftoffRegister src1,
2766                                                 LiftoffRegister src2) {
2767   Simd128Register s1 = src1.fp();
2768   Simd128Register s2 = src2.fp();
2769   Simd128Register dest = dst.fp();
2770   // Make sure temp registers are unique.
2771   Simd128Register temp1 =
2772       GetUnusedRegister(kFpReg, LiftoffRegList{dest, s1, s2}).fp();
2773   Simd128Register temp2 =
2774       GetUnusedRegister(kFpReg, LiftoffRegList{dest, s1, s2, temp1}).fp();
2775   I16x8Q15MulRSatS(dest, s1, s2, kScratchDoubleReg, temp1, temp2);
2776 }
2777 
emit_i8x16_shuffle(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs,const uint8_t shuffle[16],bool is_swizzle)2778 void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
2779                                           LiftoffRegister lhs,
2780                                           LiftoffRegister rhs,
2781                                           const uint8_t shuffle[16],
2782                                           bool is_swizzle) {
2783   // Remap the shuffle indices to match IBM lane numbering.
2784   // TODO(miladfarca): Put this in a function and share it with the instrction
2785   // selector.
2786   int max_index = 15;
2787   int total_lane_count = 2 * kSimd128Size;
2788   uint8_t shuffle_remapped[kSimd128Size];
2789   for (int i = 0; i < kSimd128Size; i++) {
2790     uint8_t current_index = shuffle[i];
2791     shuffle_remapped[i] = (current_index <= max_index
2792                                ? max_index - current_index
2793                                : total_lane_count - current_index + max_index);
2794   }
2795   uint64_t vals[2];
2796   memcpy(vals, shuffle_remapped, sizeof(shuffle_remapped));
2797 #ifdef V8_TARGET_BIG_ENDIAN
2798   vals[0] = ByteReverse(vals[0]);
2799   vals[1] = ByteReverse(vals[1]);
2800 #endif
2801   I8x16Shuffle(dst.fp(), lhs.fp(), rhs.fp(), vals[1], vals[0], r0, ip,
2802                kScratchDoubleReg);
2803 }
2804 
emit_i8x16_popcnt(LiftoffRegister dst,LiftoffRegister src)2805 void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
2806                                          LiftoffRegister src) {
2807   I8x16Popcnt(dst.fp(), src.fp());
2808 }
2809 
emit_v128_anytrue(LiftoffRegister dst,LiftoffRegister src)2810 void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
2811                                          LiftoffRegister src) {
2812   V128AnyTrue(dst.gp(), src.fp(), r0);
2813 }
2814 
emit_i8x16_bitmask(LiftoffRegister dst,LiftoffRegister src)2815 void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
2816                                           LiftoffRegister src) {
2817   I8x16BitMask(dst.gp(), src.fp(), r0, ip, kScratchDoubleReg);
2818 }
2819 
emit_s128_const(LiftoffRegister dst,const uint8_t imms[16])2820 void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
2821                                        const uint8_t imms[16]) {
2822   uint64_t vals[2];
2823   memcpy(vals, imms, sizeof(vals));
2824 #ifdef V8_TARGET_BIG_ENDIAN
2825   vals[0] = ByteReverse(vals[0]);
2826   vals[1] = ByteReverse(vals[1]);
2827 #endif
2828   S128Const(dst.fp(), vals[1], vals[0], r0, ip);
2829 }
2830 
emit_s128_select(LiftoffRegister dst,LiftoffRegister src1,LiftoffRegister src2,LiftoffRegister mask)2831 void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
2832                                         LiftoffRegister src1,
2833                                         LiftoffRegister src2,
2834                                         LiftoffRegister mask) {
2835   S128Select(dst.fp(), src1.fp(), src2.fp(), mask.fp());
2836 }
2837 
emit_i32x4_sconvert_f32x4(LiftoffRegister dst,LiftoffRegister src)2838 void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
2839                                                  LiftoffRegister src) {
2840   I32x4SConvertF32x4(dst.fp(), src.fp(), kScratchDoubleReg, r0);
2841 }
2842 
emit_i32x4_uconvert_f32x4(LiftoffRegister dst,LiftoffRegister src)2843 void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
2844                                                  LiftoffRegister src) {
2845   I32x4UConvertF32x4(dst.fp(), src.fp(), kScratchDoubleReg, r0);
2846 }
2847 
emit_f32x4_sconvert_i32x4(LiftoffRegister dst,LiftoffRegister src)2848 void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
2849                                                  LiftoffRegister src) {
2850   F32x4SConvertI32x4(dst.fp(), src.fp(), kScratchDoubleReg, r0);
2851 }
2852 
emit_f32x4_uconvert_i32x4(LiftoffRegister dst,LiftoffRegister src)2853 void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
2854                                                  LiftoffRegister src) {
2855   F32x4UConvertI32x4(dst.fp(), src.fp(), kScratchDoubleReg, r0);
2856 }
2857 
emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,LiftoffRegister src)2858 void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
2859                                                     LiftoffRegister src) {
2860   F32x4DemoteF64x2Zero(dst.fp(), src.fp(), kScratchDoubleReg, r0, r1, ip);
2861 }
2862 
emit_i8x16_sconvert_i16x8(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs)2863 void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
2864                                                  LiftoffRegister lhs,
2865                                                  LiftoffRegister rhs) {
2866   I8x16SConvertI16x8(dst.fp(), lhs.fp(), rhs.fp());
2867 }
2868 
emit_i8x16_uconvert_i16x8(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs)2869 void LiftoffAssembler::emit_i8x16_uconvert_i16x8(LiftoffRegister dst,
2870                                                  LiftoffRegister lhs,
2871                                                  LiftoffRegister rhs) {
2872   I8x16UConvertI16x8(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
2873 }
2874 
emit_i16x8_sconvert_i32x4(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs)2875 void LiftoffAssembler::emit_i16x8_sconvert_i32x4(LiftoffRegister dst,
2876                                                  LiftoffRegister lhs,
2877                                                  LiftoffRegister rhs) {
2878   I16x8SConvertI32x4(dst.fp(), lhs.fp(), rhs.fp());
2879 }
2880 
emit_i16x8_uconvert_i32x4(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs)2881 void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
2882                                                  LiftoffRegister lhs,
2883                                                  LiftoffRegister rhs) {
2884   I16x8UConvertI32x4(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
2885 }
2886 
emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,LiftoffRegister src)2887 void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
2888                                                          LiftoffRegister src) {
2889   I32x4TruncSatF64x2SZero(dst.fp(), src.fp(), kScratchDoubleReg);
2890 }
2891 
emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,LiftoffRegister src)2892 void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
2893                                                          LiftoffRegister src) {
2894   I32x4TruncSatF64x2UZero(dst.fp(), src.fp(), kScratchDoubleReg);
2895 }
2896 
StackCheck(Label * ool_code,Register limit_address)2897 void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
2898   LoadU64(limit_address, MemOperand(limit_address));
2899   CmpU64(sp, limit_address);
2900   b(le, ool_code);
2901 }
2902 
CallTrapCallbackForTesting()2903 void LiftoffAssembler::CallTrapCallbackForTesting() {
2904   PrepareCallCFunction(0, 0, no_reg);
2905   CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
2906 }
2907 
AssertUnreachable(AbortReason reason)2908 void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
2909   // Asserts unreachable within the wasm code.
2910   TurboAssembler::AssertUnreachable(reason);
2911 }
2912 
PushRegisters(LiftoffRegList regs)2913 void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
2914   MultiPush(regs.GetGpList());
2915   MultiPushF64OrV128(regs.GetFpList(), ip);
2916 }
2917 
PopRegisters(LiftoffRegList regs)2918 void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
2919   MultiPopF64OrV128(regs.GetFpList(), ip);
2920   MultiPop(regs.GetGpList());
2921 }
2922 
RecordSpillsInSafepoint(SafepointTableBuilder::Safepoint & safepoint,LiftoffRegList all_spills,LiftoffRegList ref_spills,int spill_offset)2923 void LiftoffAssembler::RecordSpillsInSafepoint(
2924     SafepointTableBuilder::Safepoint& safepoint, LiftoffRegList all_spills,
2925     LiftoffRegList ref_spills, int spill_offset) {
2926   int spill_space_size = 0;
2927   while (!all_spills.is_empty()) {
2928     LiftoffRegister reg = all_spills.GetLastRegSet();
2929     if (ref_spills.has(reg)) {
2930       safepoint.DefineTaggedStackSlot(spill_offset);
2931     }
2932     all_spills.clear(reg);
2933     ++spill_offset;
2934     spill_space_size += kSystemPointerSize;
2935   }
2936   // Record the number of additional spill slots.
2937   RecordOolSpillSpaceSize(spill_space_size);
2938 }
2939 
DropStackSlotsAndRet(uint32_t num_stack_slots)2940 void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
2941   Drop(num_stack_slots);
2942   Ret();
2943 }
2944 
CallC(const ValueKindSig * sig,const LiftoffRegister * args,const LiftoffRegister * rets,ValueKind out_argument_kind,int stack_bytes,ExternalReference ext_ref)2945 void LiftoffAssembler::CallC(const ValueKindSig* sig,
2946                              const LiftoffRegister* args,
2947                              const LiftoffRegister* rets,
2948                              ValueKind out_argument_kind, int stack_bytes,
2949                              ExternalReference ext_ref) {
2950   int total_size = RoundUp(stack_bytes, 8);
2951 
2952   int size = total_size;
2953   constexpr int kStackPageSize = 4 * KB;
2954 
2955   // Reserve space in the stack.
2956   while (size > kStackPageSize) {
2957     lay(sp, MemOperand(sp, -kStackPageSize));
2958     StoreU64(r0, MemOperand(sp));
2959     size -= kStackPageSize;
2960   }
2961 
2962   lay(sp, MemOperand(sp, -size));
2963 
2964   int arg_bytes = 0;
2965   for (ValueKind param_kind : sig->parameters()) {
2966     switch (param_kind) {
2967       case kI32:
2968         StoreU32(args->gp(), MemOperand(sp, arg_bytes));
2969         break;
2970       case kI64:
2971         StoreU64(args->gp(), MemOperand(sp, arg_bytes));
2972         break;
2973       case kF32:
2974         StoreF32(args->fp(), MemOperand(sp, arg_bytes));
2975         break;
2976       case kF64:
2977         StoreF64(args->fp(), MemOperand(sp, arg_bytes));
2978         break;
2979       default:
2980         UNREACHABLE();
2981     }
2982     args++;
2983     arg_bytes += value_kind_size(param_kind);
2984   }
2985 
2986   DCHECK_LE(arg_bytes, stack_bytes);
2987 
2988   // Pass a pointer to the buffer with the arguments to the C function.
2989   mov(r2, sp);
2990 
2991   // Now call the C function.
2992   constexpr int kNumCCallArgs = 1;
2993   PrepareCallCFunction(kNumCCallArgs, no_reg);
2994   CallCFunction(ext_ref, kNumCCallArgs);
2995 
2996   // Move return value to the right register.
2997   const LiftoffRegister* result_reg = rets;
2998   if (sig->return_count() > 0) {
2999     DCHECK_EQ(1, sig->return_count());
3000     constexpr Register kReturnReg = r2;
3001     if (kReturnReg != rets->gp()) {
3002       Move(*rets, LiftoffRegister(kReturnReg), sig->GetReturn(0));
3003     }
3004     result_reg++;
3005   }
3006 
3007   // Load potential output value from the buffer on the stack.
3008   if (out_argument_kind != kVoid) {
3009     switch (out_argument_kind) {
3010       case kI32:
3011         LoadS32(result_reg->gp(), MemOperand(sp));
3012         break;
3013       case kI64:
3014       case kOptRef:
3015       case kRef:
3016       case kRtt:
3017         LoadU64(result_reg->gp(), MemOperand(sp));
3018         break;
3019       case kF32:
3020         LoadF32(result_reg->fp(), MemOperand(sp));
3021         break;
3022       case kF64:
3023         LoadF64(result_reg->fp(), MemOperand(sp));
3024         break;
3025       default:
3026         UNREACHABLE();
3027     }
3028   }
3029   lay(sp, MemOperand(sp, total_size));
3030 }
3031 
CallNativeWasmCode(Address addr)3032 void LiftoffAssembler::CallNativeWasmCode(Address addr) {
3033   Call(addr, RelocInfo::WASM_CALL);
3034 }
3035 
TailCallNativeWasmCode(Address addr)3036 void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
3037   Jump(addr, RelocInfo::WASM_CALL);
3038 }
3039 
CallIndirect(const ValueKindSig * sig,compiler::CallDescriptor * call_descriptor,Register target)3040 void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
3041                                     compiler::CallDescriptor* call_descriptor,
3042                                     Register target) {
3043   DCHECK(target != no_reg);
3044   Call(target);
3045 }
3046 
TailCallIndirect(Register target)3047 void LiftoffAssembler::TailCallIndirect(Register target) {
3048   DCHECK(target != no_reg);
3049   Jump(target);
3050 }
3051 
CallRuntimeStub(WasmCode::RuntimeStubId sid)3052 void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
3053   Call(static_cast<Address>(sid), RelocInfo::WASM_STUB_CALL);
3054 }
3055 
AllocateStackSlot(Register addr,uint32_t size)3056 void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
3057   lay(sp, MemOperand(sp, -size));
3058   TurboAssembler::Move(addr, sp);
3059 }
3060 
DeallocateStackSlot(uint32_t size)3061 void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
3062   lay(sp, MemOperand(sp, size));
3063 }
3064 
MaybeOSR()3065 void LiftoffAssembler::MaybeOSR() {}
3066 
emit_set_if_nan(Register dst,DoubleRegister src,ValueKind kind)3067 void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
3068                                        ValueKind kind) {
3069   Label return_nan, done;
3070   if (kind == kF32) {
3071     cebr(src, src);
3072     bunordered(&return_nan);
3073   } else {
3074     DCHECK_EQ(kind, kF64);
3075     cdbr(src, src);
3076     bunordered(&return_nan);
3077   }
3078   b(&done);
3079   bind(&return_nan);
3080   StoreF32LE(src, MemOperand(dst), r0);
3081   bind(&done);
3082 }
3083 
emit_s128_set_if_nan(Register dst,LiftoffRegister src,Register tmp_gp,LiftoffRegister tmp_s128,ValueKind lane_kind)3084 void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
3085                                             Register tmp_gp,
3086                                             LiftoffRegister tmp_s128,
3087                                             ValueKind lane_kind) {
3088   Label return_nan, done;
3089   if (lane_kind == kF32) {
3090     vfce(tmp_s128.fp(), src.fp(), src.fp(), Condition(1), Condition(0),
3091          Condition(2));
3092     b(Condition(0x5), &return_nan);  // If any or all are NaN.
3093   } else {
3094     DCHECK_EQ(lane_kind, kF64);
3095     vfce(tmp_s128.fp(), src.fp(), src.fp(), Condition(1), Condition(0),
3096          Condition(3));
3097     b(Condition(0x5), &return_nan);
3098   }
3099   b(&done);
3100   bind(&return_nan);
3101   StoreF32LE(src.fp(), MemOperand(dst), r0);
3102   bind(&done);
3103 }
3104 
Construct(int param_slots)3105 void LiftoffStackSlots::Construct(int param_slots) {
3106   DCHECK_LT(0, slots_.size());
3107   SortInPushOrder();
3108   int last_stack_slot = param_slots;
3109   for (auto& slot : slots_) {
3110     const int stack_slot = slot.dst_slot_;
3111     int stack_decrement = (last_stack_slot - stack_slot) * kSystemPointerSize;
3112     DCHECK_LT(0, stack_decrement);
3113     last_stack_slot = stack_slot;
3114     const LiftoffAssembler::VarState& src = slot.src_;
3115     switch (src.loc()) {
3116       case LiftoffAssembler::VarState::kStack: {
3117         switch (src.kind()) {
3118           case kI32:
3119           case kRef:
3120           case kOptRef:
3121           case kRtt:
3122           case kI64: {
3123             asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
3124             UseScratchRegisterScope temps(asm_);
3125             Register scratch = temps.Acquire();
3126             asm_->LoadU64(scratch, liftoff::GetStackSlot(slot.src_offset_));
3127             asm_->Push(scratch);
3128             break;
3129           }
3130           case kF32: {
3131             asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
3132             asm_->LoadF32(kScratchDoubleReg,
3133                           liftoff::GetStackSlot(slot.src_offset_));
3134             asm_->lay(sp, MemOperand(sp, -kSystemPointerSize));
3135             asm_->StoreF32(kScratchDoubleReg, MemOperand(sp));
3136             break;
3137           }
3138           case kF64: {
3139             asm_->AllocateStackSpace(stack_decrement - kDoubleSize);
3140             asm_->LoadF64(kScratchDoubleReg,
3141                           liftoff::GetStackSlot(slot.src_offset_));
3142             asm_->push(kScratchDoubleReg);
3143             break;
3144           }
3145           case kS128: {
3146             asm_->AllocateStackSpace(stack_decrement - kSimd128Size);
3147             UseScratchRegisterScope temps(asm_);
3148             Register scratch = temps.Acquire();
3149             asm_->LoadV128(kScratchDoubleReg,
3150                            liftoff::GetStackSlot(slot.src_offset_), scratch);
3151             asm_->lay(sp, MemOperand(sp, -kSimd128Size));
3152             asm_->StoreV128(kScratchDoubleReg, MemOperand(sp), scratch);
3153             break;
3154           }
3155           default:
3156             UNREACHABLE();
3157         }
3158         break;
3159       }
3160       case LiftoffAssembler::VarState::kRegister: {
3161         int pushed_bytes = SlotSizeInBytes(slot);
3162         asm_->AllocateStackSpace(stack_decrement - pushed_bytes);
3163         switch (src.kind()) {
3164           case kI64:
3165           case kI32:
3166           case kRef:
3167           case kOptRef:
3168           case kRtt:
3169             asm_->push(src.reg().gp());
3170             break;
3171           case kF32:
3172             asm_->lay(sp, MemOperand(sp, -kSystemPointerSize));
3173             asm_->StoreF32(src.reg().fp(), MemOperand(sp));
3174             break;
3175           case kF64:
3176             asm_->push(src.reg().fp());
3177             break;
3178           case kS128: {
3179             UseScratchRegisterScope temps(asm_);
3180             Register scratch = temps.Acquire();
3181             asm_->lay(sp, MemOperand(sp, -kSimd128Size));
3182             asm_->StoreV128(src.reg().fp(), MemOperand(sp), scratch);
3183             break;
3184           }
3185           default:
3186             UNREACHABLE();
3187         }
3188         break;
3189       }
3190       case LiftoffAssembler::VarState::kIntConst: {
3191         asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
3192         DCHECK(src.kind() == kI32 || src.kind() == kI64);
3193         UseScratchRegisterScope temps(asm_);
3194         Register scratch = temps.Acquire();
3195 
3196         switch (src.kind()) {
3197           case kI32:
3198             asm_->mov(scratch, Operand(src.i32_const()));
3199             break;
3200           case kI64:
3201             asm_->mov(scratch, Operand(int64_t{slot.src_.i32_const()}));
3202             break;
3203           default:
3204             UNREACHABLE();
3205         }
3206         asm_->push(scratch);
3207         break;
3208       }
3209     }
3210   }
3211 }
3212 
3213 }  // namespace wasm
3214 }  // namespace internal
3215 }  // namespace v8
3216 
3217 #undef BAILOUT
3218 
3219 #endif  // V8_WASM_BASELINE_S390_LIFTOFF_ASSEMBLER_S390_H_
3220