• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_
6 #define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_
7 
8 #include <iosfwd>
9 #include <memory>
10 
11 #include "src/base/bits.h"
12 #include "src/frames.h"
13 #include "src/macro-assembler.h"
14 #include "src/wasm/baseline/liftoff-assembler-defs.h"
15 #include "src/wasm/baseline/liftoff-register.h"
16 #include "src/wasm/function-body-decoder.h"
17 #include "src/wasm/wasm-code-manager.h"
18 #include "src/wasm/wasm-module.h"
19 #include "src/wasm/wasm-opcodes.h"
20 #include "src/wasm/wasm-value.h"
21 
22 namespace v8 {
23 namespace internal {
24 
25 // Forward declarations.
26 namespace compiler {
27 class CallDescriptor;
28 }
29 
30 namespace wasm {
31 
32 class LiftoffAssembler : public TurboAssembler {
33  public:
34   // Each slot in our stack frame currently has exactly 8 bytes.
35   static constexpr uint32_t kStackSlotSize = 8;
36 
37   static constexpr ValueType kWasmIntPtr =
38       kPointerSize == 8 ? kWasmI64 : kWasmI32;
39 
40   class VarState {
41    public:
42     enum Location : uint8_t { kStack, kRegister, KIntConst };
43 
VarState(ValueType type)44     explicit VarState(ValueType type) : loc_(kStack), type_(type) {}
VarState(ValueType type,LiftoffRegister r)45     explicit VarState(ValueType type, LiftoffRegister r)
46         : loc_(kRegister), type_(type), reg_(r) {
47       DCHECK_EQ(r.reg_class(), reg_class_for(type));
48     }
VarState(ValueType type,int32_t i32_const)49     explicit VarState(ValueType type, int32_t i32_const)
50         : loc_(KIntConst), type_(type), i32_const_(i32_const) {
51       DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
52     }
53 
54     bool operator==(const VarState& other) const {
55       if (loc_ != other.loc_) return false;
56       if (type_ != other.type_) return false;
57       switch (loc_) {
58         case kStack:
59           return true;
60         case kRegister:
61           return reg_ == other.reg_;
62         case KIntConst:
63           return i32_const_ == other.i32_const_;
64       }
65       UNREACHABLE();
66     }
67 
is_stack()68     bool is_stack() const { return loc_ == kStack; }
is_gp_reg()69     bool is_gp_reg() const { return loc_ == kRegister && reg_.is_gp(); }
is_fp_reg()70     bool is_fp_reg() const { return loc_ == kRegister && reg_.is_fp(); }
is_reg()71     bool is_reg() const { return loc_ == kRegister; }
is_const()72     bool is_const() const { return loc_ == KIntConst; }
73 
type()74     ValueType type() const { return type_; }
75 
loc()76     Location loc() const { return loc_; }
77 
i32_const()78     int32_t i32_const() const {
79       DCHECK_EQ(loc_, KIntConst);
80       return i32_const_;
81     }
constant()82     WasmValue constant() const {
83       DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
84       DCHECK_EQ(loc_, KIntConst);
85       return type_ == kWasmI32 ? WasmValue(i32_const_)
86                                : WasmValue(int64_t{i32_const_});
87     }
88 
gp_reg()89     Register gp_reg() const { return reg().gp(); }
fp_reg()90     DoubleRegister fp_reg() const { return reg().fp(); }
reg()91     LiftoffRegister reg() const {
92       DCHECK_EQ(loc_, kRegister);
93       return reg_;
94     }
reg_class()95     RegClass reg_class() const { return reg().reg_class(); }
96 
MakeStack()97     void MakeStack() { loc_ = kStack; }
98 
99    private:
100     Location loc_;
101     // TODO(wasm): This is redundant, the decoder already knows the type of each
102     // stack value. Try to collapse.
103     ValueType type_;
104 
105     union {
106       LiftoffRegister reg_;  // used if loc_ == kRegister
107       int32_t i32_const_;    // used if loc_ == KIntConst
108     };
109   };
110 
111   ASSERT_TRIVIALLY_COPYABLE(VarState);
112 
113   struct CacheState {
114     // Allow default construction, move construction, and move assignment.
115     CacheState() = default;
116     CacheState(CacheState&&) = default;
117     CacheState& operator=(CacheState&&) = default;
118 
119     // TODO(clemensh): Improve memory management here; avoid std::vector.
120     std::vector<VarState> stack_state;
121     LiftoffRegList used_registers;
122     uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0};
123     LiftoffRegList last_spilled_regs;
124     // TODO(clemensh): Remove stack_base; use ControlBase::stack_depth.
125     uint32_t stack_base = 0;
126 
127     bool has_unused_register(RegClass rc, LiftoffRegList pinned = {}) const {
128       if (kNeedI64RegPair && rc == kGpRegPair) {
129         LiftoffRegList available_regs =
130             kGpCacheRegList & ~used_registers & ~pinned;
131         return available_regs.GetNumRegsSet() >= 2;
132       }
133       DCHECK(rc == kGpReg || rc == kFpReg);
134       LiftoffRegList candidates = GetCacheRegList(rc);
135       return has_unused_register(candidates, pinned);
136     }
137 
138     bool has_unused_register(LiftoffRegList candidates,
139                              LiftoffRegList pinned = {}) const {
140       LiftoffRegList available_regs = candidates & ~used_registers & ~pinned;
141       return !available_regs.is_empty();
142     }
143 
144     LiftoffRegister unused_register(RegClass rc,
145                                     LiftoffRegList pinned = {}) const {
146       if (kNeedI64RegPair && rc == kGpRegPair) {
147         Register low = pinned.set(unused_register(kGpReg, pinned)).gp();
148         Register high = unused_register(kGpReg, pinned).gp();
149         return LiftoffRegister::ForPair(low, high);
150       }
151       DCHECK(rc == kGpReg || rc == kFpReg);
152       LiftoffRegList candidates = GetCacheRegList(rc);
153       return unused_register(candidates, pinned);
154     }
155 
156     LiftoffRegister unused_register(LiftoffRegList candidates,
157                                     LiftoffRegList pinned = {}) const {
158       LiftoffRegList available_regs = candidates & ~used_registers & ~pinned;
159       return available_regs.GetFirstRegSet();
160     }
161 
inc_usedCacheState162     void inc_used(LiftoffRegister reg) {
163       if (reg.is_pair()) {
164         inc_used(reg.low());
165         inc_used(reg.high());
166         return;
167       }
168       used_registers.set(reg);
169       DCHECK_GT(kMaxInt, register_use_count[reg.liftoff_code()]);
170       ++register_use_count[reg.liftoff_code()];
171     }
172 
173     // Returns whether this was the last use.
dec_usedCacheState174     void dec_used(LiftoffRegister reg) {
175       DCHECK(is_used(reg));
176       if (reg.is_pair()) {
177         dec_used(reg.low());
178         dec_used(reg.high());
179         return;
180       }
181       int code = reg.liftoff_code();
182       DCHECK_LT(0, register_use_count[code]);
183       if (--register_use_count[code] == 0) used_registers.clear(reg);
184     }
185 
is_usedCacheState186     bool is_used(LiftoffRegister reg) const {
187       if (reg.is_pair()) return is_used(reg.low()) || is_used(reg.high());
188       bool used = used_registers.has(reg);
189       DCHECK_EQ(used, register_use_count[reg.liftoff_code()] != 0);
190       return used;
191     }
192 
get_use_countCacheState193     uint32_t get_use_count(LiftoffRegister reg) const {
194       if (reg.is_pair()) {
195         DCHECK_EQ(register_use_count[reg.low().liftoff_code()],
196                   register_use_count[reg.high().liftoff_code()]);
197         reg = reg.low();
198       }
199       DCHECK_GT(arraysize(register_use_count), reg.liftoff_code());
200       return register_use_count[reg.liftoff_code()];
201     }
202 
clear_usedCacheState203     void clear_used(LiftoffRegister reg) {
204       register_use_count[reg.liftoff_code()] = 0;
205       used_registers.clear(reg);
206     }
207 
is_freeCacheState208     bool is_free(LiftoffRegister reg) const { return !is_used(reg); }
209 
reset_used_registersCacheState210     void reset_used_registers() {
211       used_registers = {};
212       memset(register_use_count, 0, sizeof(register_use_count));
213     }
214 
215     LiftoffRegister GetNextSpillReg(LiftoffRegList candidates,
216                                     LiftoffRegList pinned = {}) {
217       LiftoffRegList unpinned = candidates.MaskOut(pinned);
218       DCHECK(!unpinned.is_empty());
219       // This method should only be called if none of the candidates is free.
220       DCHECK(unpinned.MaskOut(used_registers).is_empty());
221       LiftoffRegList unspilled = unpinned.MaskOut(last_spilled_regs);
222       if (unspilled.is_empty()) {
223         unspilled = unpinned;
224         last_spilled_regs = {};
225       }
226       LiftoffRegister reg = unspilled.GetFirstRegSet();
227       last_spilled_regs.set(reg);
228       return reg;
229     }
230 
231     // TODO(clemensh): Don't copy the full parent state (this makes us N^2).
232     void InitMerge(const CacheState& source, uint32_t num_locals,
233                    uint32_t arity);
234 
235     void Steal(CacheState& source);
236 
237     void Split(const CacheState& source);
238 
stack_heightCacheState239     uint32_t stack_height() const {
240       return static_cast<uint32_t>(stack_state.size());
241     }
242 
243    private:
244     // Make the copy assignment operator private (to be used from {Split()}).
245     CacheState& operator=(const CacheState&) = default;
246     // Disallow copy construction.
247     CacheState(const CacheState&) = delete;
248   };
249 
250   LiftoffAssembler();
251   ~LiftoffAssembler();
252 
253   LiftoffRegister PopToRegister(LiftoffRegList pinned = {});
254 
PushRegister(ValueType type,LiftoffRegister reg)255   void PushRegister(ValueType type, LiftoffRegister reg) {
256     DCHECK_EQ(reg_class_for(type), reg.reg_class());
257     cache_state_.inc_used(reg);
258     cache_state_.stack_state.emplace_back(type, reg);
259   }
260 
261   void SpillRegister(LiftoffRegister);
262 
GetNumUses(LiftoffRegister reg)263   uint32_t GetNumUses(LiftoffRegister reg) {
264     return cache_state_.get_use_count(reg);
265   }
266 
267   // Get an unused register for class {rc}, reusing one of {try_first} if
268   // possible.
269   LiftoffRegister GetUnusedRegister(
270       RegClass rc, std::initializer_list<LiftoffRegister> try_first,
271       LiftoffRegList pinned = {}) {
272     for (LiftoffRegister reg : try_first) {
273       DCHECK_EQ(reg.reg_class(), rc);
274       if (cache_state_.is_free(reg)) return reg;
275     }
276     return GetUnusedRegister(rc, pinned);
277   }
278 
279   // Get an unused register for class {rc}, potentially spilling to free one.
280   LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned = {}) {
281     if (kNeedI64RegPair && rc == kGpRegPair) {
282       LiftoffRegList candidates = kGpCacheRegList;
283       Register low = pinned.set(GetUnusedRegister(candidates, pinned)).gp();
284       Register high = GetUnusedRegister(candidates, pinned).gp();
285       return LiftoffRegister::ForPair(low, high);
286     }
287     DCHECK(rc == kGpReg || rc == kFpReg);
288     LiftoffRegList candidates = GetCacheRegList(rc);
289     return GetUnusedRegister(candidates, pinned);
290   }
291 
292   // Get an unused register of {candidates}, potentially spilling to free one.
293   LiftoffRegister GetUnusedRegister(LiftoffRegList candidates,
294                                     LiftoffRegList pinned = {}) {
295     if (cache_state_.has_unused_register(candidates, pinned)) {
296       return cache_state_.unused_register(candidates, pinned);
297     }
298     return SpillOneRegister(candidates, pinned);
299   }
300 
301   void MergeFullStackWith(CacheState&);
302   void MergeStackWith(CacheState&, uint32_t arity);
303 
304   void Spill(uint32_t index);
305   void SpillLocals();
306   void SpillAllRegisters();
307 
308   // Call this method whenever spilling something, such that the number of used
309   // spill slot can be tracked and the stack frame will be allocated big enough.
RecordUsedSpillSlot(uint32_t index)310   void RecordUsedSpillSlot(uint32_t index) {
311     if (index >= num_used_spill_slots_) num_used_spill_slots_ = index + 1;
312   }
313 
314   // Load parameters into the right registers / stack slots for the call.
315   // Move {*target} into another register if needed and update {*target} to that
316   // register, or {no_reg} if target was spilled to the stack.
317   void PrepareCall(FunctionSig*, compiler::CallDescriptor*,
318                    Register* target = nullptr,
319                    LiftoffRegister* target_instance = nullptr);
320   // Process return values of the call.
321   void FinishCall(FunctionSig*, compiler::CallDescriptor*);
322 
323   // Move {src} into {dst}. {src} and {dst} must be different.
324   void Move(LiftoffRegister dst, LiftoffRegister src, ValueType);
325 
326   // Parallel register move: For a list of tuples <dst, src, type>, move the
327   // {src} register of type {type} into {dst}. If {src} equals {dst}, ignore
328   // that tuple.
329   struct ParallelRegisterMoveTuple {
330     LiftoffRegister dst;
331     LiftoffRegister src;
332     ValueType type;
333   };
334   void ParallelRegisterMove(std::initializer_list<ParallelRegisterMoveTuple>);
335 
336   // Validate that the register use counts reflect the state of the cache.
337   bool ValidateCacheState() const;
338 
339   ////////////////////////////////////
340   // Platform-specific part.        //
341   ////////////////////////////////////
342 
343   // This function emits machine code to prepare the stack frame, before the
344   // size of the stack frame is known. It returns an offset in the machine code
345   // which can later be patched (via {PatchPrepareStackFrame)} when the size of
346   // the frame is known.
347   inline int PrepareStackFrame();
348   inline void PatchPrepareStackFrame(int offset, uint32_t stack_slots);
349   inline void FinishCode();
350   inline void AbortCompilation();
351 
352   inline void LoadConstant(LiftoffRegister, WasmValue,
353                            RelocInfo::Mode rmode = RelocInfo::NONE);
354   inline void LoadFromInstance(Register dst, uint32_t offset, int size);
355   inline void SpillInstance(Register instance);
356   inline void FillInstanceInto(Register dst);
357   inline void Load(LiftoffRegister dst, Register src_addr, Register offset_reg,
358                    uint32_t offset_imm, LoadType type, LiftoffRegList pinned,
359                    uint32_t* protected_load_pc = nullptr,
360                    bool is_load_mem = false);
361   inline void Store(Register dst_addr, Register offset_reg, uint32_t offset_imm,
362                     LiftoffRegister src, StoreType type, LiftoffRegList pinned,
363                     uint32_t* protected_store_pc = nullptr,
364                     bool is_store_mem = false);
365   inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx,
366                                   ValueType);
367   inline void MoveStackValue(uint32_t dst_index, uint32_t src_index, ValueType);
368 
369   inline void Move(Register dst, Register src, ValueType);
370   inline void Move(DoubleRegister dst, DoubleRegister src, ValueType);
371 
372   inline void Spill(uint32_t index, LiftoffRegister, ValueType);
373   inline void Spill(uint32_t index, WasmValue);
374   inline void Fill(LiftoffRegister, uint32_t index, ValueType);
375   // Only used on 32-bit systems: Fill a register from a "half stack slot", i.e.
376   // 4 bytes on the stack holding half of a 64-bit value. The two half_indexes
377   // corresponding to slot {index} are {2*index} and {2*index-1}.
378   inline void FillI64Half(Register, uint32_t half_index);
379 
380   // i32 binops.
381   inline void emit_i32_add(Register dst, Register lhs, Register rhs);
382   inline void emit_i32_sub(Register dst, Register lhs, Register rhs);
383   inline void emit_i32_mul(Register dst, Register lhs, Register rhs);
384   inline void emit_i32_divs(Register dst, Register lhs, Register rhs,
385                             Label* trap_div_by_zero,
386                             Label* trap_div_unrepresentable);
387   inline void emit_i32_divu(Register dst, Register lhs, Register rhs,
388                             Label* trap_div_by_zero);
389   inline void emit_i32_rems(Register dst, Register lhs, Register rhs,
390                             Label* trap_rem_by_zero);
391   inline void emit_i32_remu(Register dst, Register lhs, Register rhs,
392                             Label* trap_rem_by_zero);
393   inline void emit_i32_and(Register dst, Register lhs, Register rhs);
394   inline void emit_i32_or(Register dst, Register lhs, Register rhs);
395   inline void emit_i32_xor(Register dst, Register lhs, Register rhs);
396   inline void emit_i32_shl(Register dst, Register src, Register amount,
397                            LiftoffRegList pinned = {});
398   inline void emit_i32_sar(Register dst, Register src, Register amount,
399                            LiftoffRegList pinned = {});
400   inline void emit_i32_shr(Register dst, Register src, Register amount,
401                            LiftoffRegList pinned = {});
402 
403   // i32 unops.
404   inline bool emit_i32_clz(Register dst, Register src);
405   inline bool emit_i32_ctz(Register dst, Register src);
406   inline bool emit_i32_popcnt(Register dst, Register src);
407 
408   // i64 binops.
409   inline void emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
410                            LiftoffRegister rhs);
411   inline void emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
412                            LiftoffRegister rhs);
413   inline void emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
414                            LiftoffRegister rhs);
415   inline bool emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
416                             LiftoffRegister rhs, Label* trap_div_by_zero,
417                             Label* trap_div_unrepresentable);
418   inline bool emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
419                             LiftoffRegister rhs, Label* trap_div_by_zero);
420   inline bool emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
421                             LiftoffRegister rhs, Label* trap_rem_by_zero);
422   inline bool emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
423                             LiftoffRegister rhs, Label* trap_rem_by_zero);
424   inline void emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
425                            LiftoffRegister rhs);
426   inline void emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
427                           LiftoffRegister rhs);
428   inline void emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
429                            LiftoffRegister rhs);
430   inline void emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
431                            Register amount, LiftoffRegList pinned = {});
432   inline void emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
433                            Register amount, LiftoffRegList pinned = {});
434   inline void emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
435                            Register amount, LiftoffRegList pinned = {});
436 
437   inline void emit_i32_to_intptr(Register dst, Register src);
438 
emit_ptrsize_add(Register dst,Register lhs,Register rhs)439   inline void emit_ptrsize_add(Register dst, Register lhs, Register rhs) {
440     if (kPointerSize == 8) {
441       emit_i64_add(LiftoffRegister(dst), LiftoffRegister(lhs),
442                    LiftoffRegister(rhs));
443     } else {
444       emit_i32_add(dst, lhs, rhs);
445     }
446   }
emit_ptrsize_sub(Register dst,Register lhs,Register rhs)447   inline void emit_ptrsize_sub(Register dst, Register lhs, Register rhs) {
448     if (kPointerSize == 8) {
449       emit_i64_sub(LiftoffRegister(dst), LiftoffRegister(lhs),
450                    LiftoffRegister(rhs));
451     } else {
452       emit_i32_sub(dst, lhs, rhs);
453     }
454   }
455 
456   // f32 binops.
457   inline void emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
458                            DoubleRegister rhs);
459   inline void emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
460                            DoubleRegister rhs);
461   inline void emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
462                            DoubleRegister rhs);
463   inline void emit_f32_div(DoubleRegister dst, DoubleRegister lhs,
464                            DoubleRegister rhs);
465   inline void emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
466                            DoubleRegister rhs);
467   inline void emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
468                            DoubleRegister rhs);
469 
470   // f32 unops.
471   inline void emit_f32_abs(DoubleRegister dst, DoubleRegister src);
472   inline void emit_f32_neg(DoubleRegister dst, DoubleRegister src);
473   inline void emit_f32_ceil(DoubleRegister dst, DoubleRegister src);
474   inline void emit_f32_floor(DoubleRegister dst, DoubleRegister src);
475   inline void emit_f32_trunc(DoubleRegister dst, DoubleRegister src);
476   inline void emit_f32_nearest_int(DoubleRegister dst, DoubleRegister src);
477   inline void emit_f32_sqrt(DoubleRegister dst, DoubleRegister src);
478 
479   // f64 binops.
480   inline void emit_f64_add(DoubleRegister dst, DoubleRegister lhs,
481                            DoubleRegister rhs);
482   inline void emit_f64_sub(DoubleRegister dst, DoubleRegister lhs,
483                            DoubleRegister rhs);
484   inline void emit_f64_mul(DoubleRegister dst, DoubleRegister lhs,
485                            DoubleRegister rhs);
486   inline void emit_f64_div(DoubleRegister dst, DoubleRegister lhs,
487                            DoubleRegister rhs);
488   inline void emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
489                            DoubleRegister rhs);
490   inline void emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
491                            DoubleRegister rhs);
492 
493   // f64 unops.
494   inline void emit_f64_abs(DoubleRegister dst, DoubleRegister src);
495   inline void emit_f64_neg(DoubleRegister dst, DoubleRegister src);
496   inline bool emit_f64_ceil(DoubleRegister dst, DoubleRegister src);
497   inline bool emit_f64_floor(DoubleRegister dst, DoubleRegister src);
498   inline bool emit_f64_trunc(DoubleRegister dst, DoubleRegister src);
499   inline bool emit_f64_nearest_int(DoubleRegister dst, DoubleRegister src);
500   inline void emit_f64_sqrt(DoubleRegister dst, DoubleRegister src);
501 
502   // type conversions.
503   inline bool emit_type_conversion(WasmOpcode opcode, LiftoffRegister dst,
504                                    LiftoffRegister src, Label* trap = nullptr);
505 
506   inline void emit_jump(Label*);
507   inline void emit_jump(Register);
508 
509   inline void emit_cond_jump(Condition, Label*, ValueType value, Register lhs,
510                              Register rhs = no_reg);
511   // Set {dst} to 1 if condition holds, 0 otherwise.
512   inline void emit_i32_eqz(Register dst, Register src);
513   inline void emit_i32_set_cond(Condition, Register dst, Register lhs,
514                                 Register rhs);
515   inline void emit_i64_eqz(Register dst, LiftoffRegister src);
516   inline void emit_i64_set_cond(Condition condition, Register dst,
517                                 LiftoffRegister lhs, LiftoffRegister rhs);
518   inline void emit_f32_set_cond(Condition condition, Register dst,
519                                 DoubleRegister lhs, DoubleRegister rhs);
520   inline void emit_f64_set_cond(Condition condition, Register dst,
521                                 DoubleRegister lhs, DoubleRegister rhs);
522 
523   inline void StackCheck(Label* ool_code, Register limit_address);
524 
525   inline void CallTrapCallbackForTesting();
526 
527   inline void AssertUnreachable(AbortReason reason);
528 
529   inline void PushRegisters(LiftoffRegList);
530   inline void PopRegisters(LiftoffRegList);
531 
532   inline void DropStackSlotsAndRet(uint32_t num_stack_slots);
533 
534   // Execute a C call. Arguments are pushed to the stack and a pointer to this
535   // region is passed to the C function. If {out_argument_type != kWasmStmt},
536   // this is the return value of the C function, stored in {rets[0]}. Further
537   // outputs (specified in {sig->returns()}) are read from the buffer and stored
538   // in the remaining {rets} registers.
539   inline void CallC(FunctionSig* sig, const LiftoffRegister* args,
540                     const LiftoffRegister* rets, ValueType out_argument_type,
541                     int stack_bytes, ExternalReference ext_ref);
542 
543   inline void CallNativeWasmCode(Address addr);
544   // Indirect call: If {target == no_reg}, then pop the target from the stack.
545   inline void CallIndirect(FunctionSig* sig,
546                            compiler::CallDescriptor* call_descriptor,
547                            Register target);
548   inline void CallRuntimeStub(WasmCode::RuntimeStubId sid);
549 
550   // Reserve space in the current frame, store address to space in {addr}.
551   inline void AllocateStackSlot(Register addr, uint32_t size);
552   inline void DeallocateStackSlot(uint32_t size);
553 
554   ////////////////////////////////////
555   // End of platform-specific part. //
556   ////////////////////////////////////
557 
num_locals()558   uint32_t num_locals() const { return num_locals_; }
559   void set_num_locals(uint32_t num_locals);
560 
GetTotalFrameSlotCount()561   uint32_t GetTotalFrameSlotCount() const {
562     return num_locals_ + num_used_spill_slots_;
563   }
564 
local_type(uint32_t index)565   ValueType local_type(uint32_t index) {
566     DCHECK_GT(num_locals_, index);
567     ValueType* locals =
568         num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_;
569     return locals[index];
570   }
571 
set_local_type(uint32_t index,ValueType type)572   void set_local_type(uint32_t index, ValueType type) {
573     ValueType* locals =
574         num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_;
575     locals[index] = type;
576   }
577 
cache_state()578   CacheState* cache_state() { return &cache_state_; }
cache_state()579   const CacheState* cache_state() const { return &cache_state_; }
580 
did_bailout()581   bool did_bailout() { return bailout_reason_ != nullptr; }
bailout_reason()582   const char* bailout_reason() const { return bailout_reason_; }
583 
bailout(const char * reason)584   void bailout(const char* reason) {
585     if (bailout_reason_ != nullptr) return;
586     AbortCompilation();
587     bailout_reason_ = reason;
588   }
589 
590  private:
591   uint32_t num_locals_ = 0;
592   static constexpr uint32_t kInlineLocalTypes = 8;
593   union {
594     ValueType local_types_[kInlineLocalTypes];
595     ValueType* more_local_types_;
596   };
597   static_assert(sizeof(ValueType) == 1,
598                 "Reconsider this inlining if ValueType gets bigger");
599   CacheState cache_state_;
600   uint32_t num_used_spill_slots_ = 0;
601   const char* bailout_reason_ = nullptr;
602 
603   LiftoffRegister SpillOneRegister(LiftoffRegList candidates,
604                                    LiftoffRegList pinned);
605 };
606 
607 std::ostream& operator<<(std::ostream& os, LiftoffAssembler::VarState);
608 
609 // =======================================================================
610 // Partially platform-independent implementations of the platform-dependent
611 // part.
612 
613 #ifdef V8_TARGET_ARCH_32_BIT
614 
615 namespace liftoff {
616 template <void (LiftoffAssembler::*op)(Register, Register, Register)>
EmitI64IndependentHalfOperation(LiftoffAssembler * assm,LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs)617 void EmitI64IndependentHalfOperation(LiftoffAssembler* assm,
618                                      LiftoffRegister dst, LiftoffRegister lhs,
619                                      LiftoffRegister rhs) {
620   // If {dst.low_gp()} does not overlap with {lhs.high_gp()} or {rhs.high_gp()},
621   // just first compute the lower half, then the upper half.
622   if (dst.low() != lhs.high() && dst.low() != rhs.high()) {
623     (assm->*op)(dst.low_gp(), lhs.low_gp(), rhs.low_gp());
624     (assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp());
625     return;
626   }
627   // If {dst.high_gp()} does not overlap with {lhs.low_gp()} or {rhs.low_gp()},
628   // we can compute this the other way around.
629   if (dst.high() != lhs.low() && dst.high() != rhs.low()) {
630     (assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp());
631     (assm->*op)(dst.low_gp(), lhs.low_gp(), rhs.low_gp());
632     return;
633   }
634   // Otherwise, we need a temporary register.
635   Register tmp =
636       assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
637   (assm->*op)(tmp, lhs.low_gp(), rhs.low_gp());
638   (assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp());
639   assm->Move(dst.low_gp(), tmp, kWasmI32);
640 }
641 }  // namespace liftoff
642 
emit_i64_and(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs)643 void LiftoffAssembler::emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
644                                     LiftoffRegister rhs) {
645   liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_and>(
646       this, dst, lhs, rhs);
647 }
648 
emit_i64_or(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs)649 void LiftoffAssembler::emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
650                                    LiftoffRegister rhs) {
651   liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_or>(
652       this, dst, lhs, rhs);
653 }
654 
emit_i64_xor(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs)655 void LiftoffAssembler::emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
656                                     LiftoffRegister rhs) {
657   liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_xor>(
658       this, dst, lhs, rhs);
659 }
660 
661 #endif  // V8_TARGET_ARCH_32_BIT
662 
663 // End of the partially platform-independent implementations of the
664 // platform-dependent part.
665 // =======================================================================
666 
667 class LiftoffStackSlots {
668  public:
LiftoffStackSlots(LiftoffAssembler * wasm_asm)669   explicit LiftoffStackSlots(LiftoffAssembler* wasm_asm) : asm_(wasm_asm) {}
670 
Add(const LiftoffAssembler::VarState & src,uint32_t src_index,RegPairHalf half)671   void Add(const LiftoffAssembler::VarState& src, uint32_t src_index,
672            RegPairHalf half) {
673     slots_.emplace_back(src, src_index, half);
674   }
Add(const LiftoffAssembler::VarState & src)675   void Add(const LiftoffAssembler::VarState& src) { slots_.emplace_back(src); }
676 
677   inline void Construct();
678 
679  private:
680   struct Slot {
681     // Allow move construction.
682     Slot(Slot&&) = default;
SlotSlot683     Slot(const LiftoffAssembler::VarState& src, uint32_t src_index,
684          RegPairHalf half)
685         : src_(src), src_index_(src_index), half_(half) {}
SlotSlot686     explicit Slot(const LiftoffAssembler::VarState& src)
687         : src_(src), half_(kLowWord) {}
688 
689     const LiftoffAssembler::VarState src_;
690     uint32_t src_index_ = 0;
691     RegPairHalf half_;
692   };
693 
694   std::vector<Slot> slots_;
695   LiftoffAssembler* const asm_;
696 };
697 
698 }  // namespace wasm
699 }  // namespace internal
700 }  // namespace v8
701 
702 // Include platform specific implementation.
703 #if V8_TARGET_ARCH_IA32
704 #include "src/wasm/baseline/ia32/liftoff-assembler-ia32.h"
705 #elif V8_TARGET_ARCH_X64
706 #include "src/wasm/baseline/x64/liftoff-assembler-x64.h"
707 #elif V8_TARGET_ARCH_ARM64
708 #include "src/wasm/baseline/arm64/liftoff-assembler-arm64.h"
709 #elif V8_TARGET_ARCH_ARM
710 #include "src/wasm/baseline/arm/liftoff-assembler-arm.h"
711 #elif V8_TARGET_ARCH_PPC
712 #include "src/wasm/baseline/ppc/liftoff-assembler-ppc.h"
713 #elif V8_TARGET_ARCH_MIPS
714 #include "src/wasm/baseline/mips/liftoff-assembler-mips.h"
715 #elif V8_TARGET_ARCH_MIPS64
716 #include "src/wasm/baseline/mips64/liftoff-assembler-mips64.h"
717 #elif V8_TARGET_ARCH_S390
718 #include "src/wasm/baseline/s390/liftoff-assembler-s390.h"
719 #else
720 #error Unsupported architecture.
721 #endif
722 
723 #endif  // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_
724