• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/wasm/baseline/liftoff-assembler.h"
6 
7 #include <sstream>
8 
9 #include "src/assembler-inl.h"
10 #include "src/compiler/linkage.h"
11 #include "src/compiler/wasm-compiler.h"
12 #include "src/macro-assembler-inl.h"
13 #include "src/wasm/function-body-decoder-impl.h"
14 #include "src/wasm/wasm-opcodes.h"
15 
16 namespace v8 {
17 namespace internal {
18 namespace wasm {
19 
20 using VarState = LiftoffAssembler::VarState;
21 
22 namespace {
23 
24 #define __ asm_->
25 
26 #define TRACE(...)                                            \
27   do {                                                        \
28     if (FLAG_trace_liftoff) PrintF("[liftoff] " __VA_ARGS__); \
29   } while (false)
30 
31 class StackTransferRecipe {
32   struct RegisterMove {
33     LiftoffRegister dst;
34     LiftoffRegister src;
35     ValueType type;
RegisterMovev8::internal::wasm::__anonb91667b00111::StackTransferRecipe::RegisterMove36     constexpr RegisterMove(LiftoffRegister dst, LiftoffRegister src,
37                            ValueType type)
38         : dst(dst), src(src), type(type) {}
39   };
40   struct RegisterLoad {
41     enum LoadKind : uint8_t {
42       kConstant,  // load a constant value into a register.
43       kStack,     // fill a register from a stack slot.
44       kHalfStack  // fill one half of a register pair from half a stack slot.
45     };
46 
47     LiftoffRegister dst;
48     LoadKind kind;
49     ValueType type;
50     int32_t value;  // i32 constant value or stack index, depending on kind.
51 
52     // Named constructors.
Constv8::internal::wasm::__anonb91667b00111::StackTransferRecipe::RegisterLoad53     static RegisterLoad Const(LiftoffRegister dst, WasmValue constant) {
54       if (constant.type() == kWasmI32) {
55         return {dst, kConstant, kWasmI32, constant.to_i32()};
56       }
57       DCHECK_EQ(kWasmI64, constant.type());
58       DCHECK_EQ(constant.to_i32_unchecked(), constant.to_i64_unchecked());
59       return {dst, kConstant, kWasmI64, constant.to_i32_unchecked()};
60     }
Stackv8::internal::wasm::__anonb91667b00111::StackTransferRecipe::RegisterLoad61     static RegisterLoad Stack(LiftoffRegister dst, int32_t stack_index,
62                               ValueType type) {
63       return {dst, kStack, type, stack_index};
64     }
HalfStackv8::internal::wasm::__anonb91667b00111::StackTransferRecipe::RegisterLoad65     static RegisterLoad HalfStack(LiftoffRegister dst,
66                                   int32_t half_stack_index) {
67       return {dst, kHalfStack, kWasmI32, half_stack_index};
68     }
69 
70    private:
RegisterLoadv8::internal::wasm::__anonb91667b00111::StackTransferRecipe::RegisterLoad71     RegisterLoad(LiftoffRegister dst, LoadKind kind, ValueType type,
72                  int32_t value)
73         : dst(dst), kind(kind), type(type), value(value) {}
74   };
75 
76  public:
StackTransferRecipe(LiftoffAssembler * wasm_asm)77   explicit StackTransferRecipe(LiftoffAssembler* wasm_asm) : asm_(wasm_asm) {}
~StackTransferRecipe()78   ~StackTransferRecipe() { Execute(); }
79 
Execute()80   void Execute() {
81     // First, execute register moves. Then load constants and stack values into
82     // registers.
83 
84     if ((move_dst_regs_ & move_src_regs_).is_empty()) {
85       // No overlap in src and dst registers. Just execute the moves in any
86       // order.
87       for (RegisterMove& rm : register_moves_) {
88         asm_->Move(rm.dst, rm.src, rm.type);
89       }
90       register_moves_.clear();
91     } else {
92       // Keep use counters of src registers.
93       uint32_t src_reg_use_count[kAfterMaxLiftoffRegCode] = {0};
94       for (RegisterMove& rm : register_moves_) {
95         ++src_reg_use_count[rm.src.liftoff_code()];
96       }
97       // Now repeatedly iterate the list of register moves, and execute those
98       // whose dst register does not appear as src any more. The remaining moves
99       // are compacted during this iteration.
100       // If no more moves can be executed (because of a cycle), spill one
101       // register to the stack, add a RegisterLoad to reload it later, and
102       // continue.
103       uint32_t next_spill_slot = asm_->cache_state()->stack_height();
104       while (!register_moves_.empty()) {
105         int executed_moves = 0;
106         for (auto& rm : register_moves_) {
107           if (src_reg_use_count[rm.dst.liftoff_code()] == 0) {
108             asm_->Move(rm.dst, rm.src, rm.type);
109             ++executed_moves;
110             DCHECK_LT(0, src_reg_use_count[rm.src.liftoff_code()]);
111             --src_reg_use_count[rm.src.liftoff_code()];
112           } else if (executed_moves) {
113             // Compaction: Move not-executed moves to the beginning of the list.
114             (&rm)[-executed_moves] = rm;
115           }
116         }
117         if (executed_moves == 0) {
118           // There is a cycle. Spill one register, then continue.
119           // TODO(clemensh): Use an unused register if available.
120           RegisterMove& rm = register_moves_.back();
121           LiftoffRegister spill_reg = rm.src;
122           asm_->Spill(next_spill_slot, spill_reg, rm.type);
123           // Remember to reload into the destination register later.
124           LoadStackSlot(register_moves_.back().dst, next_spill_slot, rm.type);
125           DCHECK_EQ(1, src_reg_use_count[spill_reg.liftoff_code()]);
126           src_reg_use_count[spill_reg.liftoff_code()] = 0;
127           ++next_spill_slot;
128           executed_moves = 1;
129         }
130         register_moves_.erase(register_moves_.end() - executed_moves,
131                               register_moves_.end());
132       }
133     }
134 
135     for (RegisterLoad& rl : register_loads_) {
136       switch (rl.kind) {
137         case RegisterLoad::kConstant:
138           asm_->LoadConstant(rl.dst, rl.type == kWasmI64
139                                          ? WasmValue(int64_t{rl.value})
140                                          : WasmValue(int32_t{rl.value}));
141           break;
142         case RegisterLoad::kStack:
143           asm_->Fill(rl.dst, rl.value, rl.type);
144           break;
145         case RegisterLoad::kHalfStack:
146           // As half of a register pair, {rl.dst} must be a gp register.
147           asm_->FillI64Half(rl.dst.gp(), rl.value);
148           break;
149       }
150     }
151     register_loads_.clear();
152   }
153 
TransferStackSlot(const LiftoffAssembler::CacheState & dst_state,uint32_t dst_index,uint32_t src_index)154   void TransferStackSlot(const LiftoffAssembler::CacheState& dst_state,
155                          uint32_t dst_index, uint32_t src_index) {
156     const VarState& dst = dst_state.stack_state[dst_index];
157     const VarState& src = __ cache_state()->stack_state[src_index];
158     DCHECK_EQ(dst.type(), src.type());
159     switch (dst.loc()) {
160       case VarState::kStack:
161         switch (src.loc()) {
162           case VarState::kStack:
163             if (src_index == dst_index) break;
164             asm_->MoveStackValue(dst_index, src_index, src.type());
165             break;
166           case VarState::kRegister:
167             asm_->Spill(dst_index, src.reg(), src.type());
168             break;
169           case VarState::KIntConst:
170             asm_->Spill(dst_index, src.constant());
171             break;
172         }
173         break;
174       case VarState::kRegister:
175         LoadIntoRegister(dst.reg(), src, src_index);
176         break;
177       case VarState::KIntConst:
178         DCHECK_EQ(dst, src);
179         break;
180     }
181   }
182 
LoadIntoRegister(LiftoffRegister dst,const LiftoffAssembler::VarState & src,uint32_t src_index)183   void LoadIntoRegister(LiftoffRegister dst,
184                         const LiftoffAssembler::VarState& src,
185                         uint32_t src_index) {
186     switch (src.loc()) {
187       case VarState::kStack:
188         LoadStackSlot(dst, src_index, src.type());
189         break;
190       case VarState::kRegister:
191         DCHECK_EQ(dst.reg_class(), src.reg_class());
192         if (dst != src.reg()) MoveRegister(dst, src.reg(), src.type());
193         break;
194       case VarState::KIntConst:
195         LoadConstant(dst, src.constant());
196         break;
197     }
198   }
199 
LoadI64HalfIntoRegister(LiftoffRegister dst,const LiftoffAssembler::VarState & src,uint32_t index,RegPairHalf half)200   void LoadI64HalfIntoRegister(LiftoffRegister dst,
201                                const LiftoffAssembler::VarState& src,
202                                uint32_t index, RegPairHalf half) {
203     // Use CHECK such that the remaining code is statically dead if
204     // {kNeedI64RegPair} is false.
205     CHECK(kNeedI64RegPair);
206     DCHECK_EQ(kWasmI64, src.type());
207     switch (src.loc()) {
208       case VarState::kStack:
209         LoadI64HalfStackSlot(dst, 2 * index - (half == kLowWord ? 0 : 1));
210         break;
211       case VarState::kRegister: {
212         LiftoffRegister src_half =
213             half == kLowWord ? src.reg().low() : src.reg().high();
214         if (dst != src_half) MoveRegister(dst, src_half, kWasmI32);
215         break;
216       }
217       case VarState::KIntConst:
218         int32_t value = src.i32_const();
219         // The high word is the sign extension of the low word.
220         if (half == kHighWord) value = value >> 31;
221         LoadConstant(dst, WasmValue(value));
222         break;
223     }
224   }
225 
MoveRegister(LiftoffRegister dst,LiftoffRegister src,ValueType type)226   void MoveRegister(LiftoffRegister dst, LiftoffRegister src, ValueType type) {
227     DCHECK_NE(dst, src);
228     DCHECK_EQ(dst.reg_class(), src.reg_class());
229     DCHECK_EQ(reg_class_for(type), src.reg_class());
230     if (src.is_pair()) {
231       DCHECK_EQ(kWasmI64, type);
232       if (dst.low() != src.low()) MoveRegister(dst.low(), src.low(), kWasmI32);
233       if (dst.high() != src.high())
234         MoveRegister(dst.high(), src.high(), kWasmI32);
235       return;
236     }
237     DCHECK(!move_dst_regs_.has(dst));
238     move_dst_regs_.set(dst);
239     move_src_regs_.set(src);
240     register_moves_.emplace_back(dst, src, type);
241   }
242 
LoadConstant(LiftoffRegister dst,WasmValue value)243   void LoadConstant(LiftoffRegister dst, WasmValue value) {
244     register_loads_.push_back(RegisterLoad::Const(dst, value));
245   }
246 
LoadStackSlot(LiftoffRegister dst,uint32_t stack_index,ValueType type)247   void LoadStackSlot(LiftoffRegister dst, uint32_t stack_index,
248                      ValueType type) {
249     register_loads_.push_back(RegisterLoad::Stack(dst, stack_index, type));
250   }
251 
LoadI64HalfStackSlot(LiftoffRegister dst,uint32_t half_stack_index)252   void LoadI64HalfStackSlot(LiftoffRegister dst, uint32_t half_stack_index) {
253     register_loads_.push_back(RegisterLoad::HalfStack(dst, half_stack_index));
254   }
255 
256  private:
257   // TODO(clemensh): Avoid unconditionally allocating on the heap.
258   std::vector<RegisterMove> register_moves_;
259   std::vector<RegisterLoad> register_loads_;
260   LiftoffRegList move_dst_regs_;
261   LiftoffRegList move_src_regs_;
262   LiftoffAssembler* const asm_;
263 };
264 
265 }  // namespace
266 
267 // TODO(clemensh): Don't copy the full parent state (this makes us N^2).
InitMerge(const CacheState & source,uint32_t num_locals,uint32_t arity)268 void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
269                                              uint32_t num_locals,
270                                              uint32_t arity) {
271   DCHECK(stack_state.empty());
272   DCHECK_GE(source.stack_height(), stack_base);
273   stack_state.resize(stack_base + arity, VarState(kWasmStmt));
274 
275   // |------locals------|--(in between)--|--(discarded)--|----merge----|
276   //  <-- num_locals -->                 ^stack_base      <-- arity -->
277 
278   // First, initialize merge slots and locals. Keep them in the registers which
279   // are being used in {source}, but avoid using a register multiple times. Use
280   // unused registers where necessary and possible.
281   for (int range = 0; range < 2; ++range) {
282     auto src_idx = range ? 0 : source.stack_state.size() - arity;
283     auto src_end = range ? num_locals : source.stack_state.size();
284     auto dst_idx = range ? 0 : stack_state.size() - arity;
285     for (; src_idx < src_end; ++src_idx, ++dst_idx) {
286       auto& dst = stack_state[dst_idx];
287       auto& src = source.stack_state[src_idx];
288       // Just initialize to any register; will be overwritten before use.
289       LiftoffRegister reg(Register::from_code<0>());
290       RegClass rc = src.is_reg() ? src.reg_class() : reg_class_for(src.type());
291       if (src.is_reg() && is_free(src.reg())) {
292         reg = src.reg();
293       } else if (has_unused_register(rc)) {
294         reg = unused_register(rc);
295       } else {
296         // Make this a stack slot.
297         dst = VarState(src.type());
298         continue;
299       }
300       dst = VarState(src.type(), reg);
301       inc_used(reg);
302     }
303   }
304   // Last, initialize the section in between. Here, constants are allowed, but
305   // registers which are already used for the merge region or locals must be
306   // spilled.
307   for (uint32_t i = num_locals; i < stack_base; ++i) {
308     auto& dst = stack_state[i];
309     auto& src = source.stack_state[i];
310     if (src.is_reg()) {
311       if (is_used(src.reg())) {
312         // Make this a stack slot.
313         dst = VarState(src.type());
314       } else {
315         dst = VarState(src.type(), src.reg());
316         inc_used(src.reg());
317       }
318     } else if (src.is_const()) {
319       dst = src;
320     } else {
321       DCHECK(src.is_stack());
322       // Make this a stack slot.
323       dst = VarState(src.type());
324     }
325   }
326   last_spilled_regs = source.last_spilled_regs;
327 }
328 
Steal(CacheState & source)329 void LiftoffAssembler::CacheState::Steal(CacheState& source) {
330   // Just use the move assignment operator.
331   *this = std::move(source);
332 }
333 
Split(const CacheState & source)334 void LiftoffAssembler::CacheState::Split(const CacheState& source) {
335   // Call the private copy assignment operator.
336   *this = source;
337 }
338 
339 namespace {
340 
DefaultLiftoffOptions()341 constexpr AssemblerOptions DefaultLiftoffOptions() {
342   return AssemblerOptions{};
343 }
344 
345 }  // namespace
346 
347 // TODO(clemensh): Provide a reasonably sized buffer, based on wasm function
348 // size.
LiftoffAssembler()349 LiftoffAssembler::LiftoffAssembler()
350     : TurboAssembler(nullptr, DefaultLiftoffOptions(), nullptr, 0,
351                      CodeObjectRequired::kNo) {
352   set_abort_hard(true);  // Avoid calls to Abort.
353 }
354 
~LiftoffAssembler()355 LiftoffAssembler::~LiftoffAssembler() {
356   if (num_locals_ > kInlineLocalTypes) {
357     free(more_local_types_);
358   }
359 }
360 
PopToRegister(LiftoffRegList pinned)361 LiftoffRegister LiftoffAssembler::PopToRegister(LiftoffRegList pinned) {
362   DCHECK(!cache_state_.stack_state.empty());
363   VarState slot = cache_state_.stack_state.back();
364   cache_state_.stack_state.pop_back();
365   switch (slot.loc()) {
366     case VarState::kStack: {
367       LiftoffRegister reg =
368           GetUnusedRegister(reg_class_for(slot.type()), pinned);
369       Fill(reg, cache_state_.stack_height(), slot.type());
370       return reg;
371     }
372     case VarState::kRegister:
373       cache_state_.dec_used(slot.reg());
374       return slot.reg();
375     case VarState::KIntConst: {
376       RegClass rc =
377           kNeedI64RegPair && slot.type() == kWasmI64 ? kGpRegPair : kGpReg;
378       LiftoffRegister reg = GetUnusedRegister(rc, pinned);
379       LoadConstant(reg, slot.constant());
380       return reg;
381     }
382   }
383   UNREACHABLE();
384 }
385 
MergeFullStackWith(CacheState & target)386 void LiftoffAssembler::MergeFullStackWith(CacheState& target) {
387   DCHECK_EQ(cache_state_.stack_height(), target.stack_height());
388   // TODO(clemensh): Reuse the same StackTransferRecipe object to save some
389   // allocations.
390   StackTransferRecipe transfers(this);
391   for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) {
392     transfers.TransferStackSlot(target, i, i);
393   }
394 }
395 
MergeStackWith(CacheState & target,uint32_t arity)396 void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity) {
397   // Before: ----------------|------ pop_count -----|--- arity ---|
398   //                         ^target_stack_height   ^stack_base   ^stack_height
399   // After:  ----|-- arity --|
400   //             ^           ^target_stack_height
401   //             ^target_stack_base
402   uint32_t stack_height = cache_state_.stack_height();
403   uint32_t target_stack_height = target.stack_height();
404   DCHECK_LE(target_stack_height, stack_height);
405   DCHECK_LE(arity, target_stack_height);
406   uint32_t stack_base = stack_height - arity;
407   uint32_t target_stack_base = target_stack_height - arity;
408   StackTransferRecipe transfers(this);
409   for (uint32_t i = 0; i < target_stack_base; ++i) {
410     transfers.TransferStackSlot(target, i, i);
411   }
412   for (uint32_t i = 0; i < arity; ++i) {
413     transfers.TransferStackSlot(target, target_stack_base + i, stack_base + i);
414   }
415 }
416 
Spill(uint32_t index)417 void LiftoffAssembler::Spill(uint32_t index) {
418   auto& slot = cache_state_.stack_state[index];
419   switch (slot.loc()) {
420     case VarState::kStack:
421       return;
422     case VarState::kRegister:
423       Spill(index, slot.reg(), slot.type());
424       cache_state_.dec_used(slot.reg());
425       break;
426     case VarState::KIntConst:
427       Spill(index, slot.constant());
428       break;
429   }
430   slot.MakeStack();
431 }
432 
SpillLocals()433 void LiftoffAssembler::SpillLocals() {
434   for (uint32_t i = 0; i < num_locals_; ++i) {
435     Spill(i);
436   }
437 }
438 
SpillAllRegisters()439 void LiftoffAssembler::SpillAllRegisters() {
440   for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) {
441     auto& slot = cache_state_.stack_state[i];
442     if (!slot.is_reg()) continue;
443     Spill(i, slot.reg(), slot.type());
444     slot.MakeStack();
445   }
446   cache_state_.reset_used_registers();
447 }
448 
PrepareCall(FunctionSig * sig,compiler::CallDescriptor * call_descriptor,Register * target,LiftoffRegister * target_instance)449 void LiftoffAssembler::PrepareCall(FunctionSig* sig,
450                                    compiler::CallDescriptor* call_descriptor,
451                                    Register* target,
452                                    LiftoffRegister* target_instance) {
453   uint32_t num_params = static_cast<uint32_t>(sig->parameter_count());
454   // Input 0 is the call target.
455   constexpr size_t kInputShift = 1;
456 
457   // Spill all cache slots which are not being used as parameters.
458   // Don't update any register use counters, they will be reset later anyway.
459   for (uint32_t idx = 0, end = cache_state_.stack_height() - num_params;
460        idx < end; ++idx) {
461     VarState& slot = cache_state_.stack_state[idx];
462     if (!slot.is_reg()) continue;
463     Spill(idx, slot.reg(), slot.type());
464     slot.MakeStack();
465   }
466 
467   LiftoffStackSlots stack_slots(this);
468   StackTransferRecipe stack_transfers(this);
469   LiftoffRegList param_regs;
470 
471   // Move the target instance (if supplied) into the correct instance register.
472   compiler::LinkageLocation instance_loc =
473       call_descriptor->GetInputLocation(kInputShift);
474   DCHECK(instance_loc.IsRegister() && !instance_loc.IsAnyRegister());
475   LiftoffRegister instance_reg(Register::from_code(instance_loc.AsRegister()));
476   param_regs.set(instance_reg);
477   if (target_instance && *target_instance != instance_reg) {
478     stack_transfers.MoveRegister(instance_reg, *target_instance, kWasmIntPtr);
479   }
480 
481   // Now move all parameter values into the right slot for the call.
482   // Don't pop values yet, such that the stack height is still correct when
483   // executing the {stack_transfers}.
484   // Process parameters backwards, such that pushes of caller frame slots are
485   // in the correct order.
486   uint32_t param_base = cache_state_.stack_height() - num_params;
487   uint32_t call_desc_input_idx =
488       static_cast<uint32_t>(call_descriptor->InputCount());
489   for (uint32_t i = num_params; i > 0; --i) {
490     const uint32_t param = i - 1;
491     ValueType type = sig->GetParam(param);
492     const bool is_pair = kNeedI64RegPair && type == kWasmI64;
493     const int num_lowered_params = is_pair ? 2 : 1;
494     const uint32_t stack_idx = param_base + param;
495     const VarState& slot = cache_state_.stack_state[stack_idx];
496     // Process both halfs of a register pair separately, because they are passed
497     // as separate parameters. One or both of them could end up on the stack.
498     for (int lowered_idx = 0; lowered_idx < num_lowered_params; ++lowered_idx) {
499       const RegPairHalf half =
500           is_pair && lowered_idx == 0 ? kHighWord : kLowWord;
501       --call_desc_input_idx;
502       compiler::LinkageLocation loc =
503           call_descriptor->GetInputLocation(call_desc_input_idx);
504       if (loc.IsRegister()) {
505         DCHECK(!loc.IsAnyRegister());
506         RegClass rc = is_pair ? kGpReg : reg_class_for(type);
507         LiftoffRegister reg = LiftoffRegister::from_code(rc, loc.AsRegister());
508         param_regs.set(reg);
509         if (is_pair) {
510           stack_transfers.LoadI64HalfIntoRegister(reg, slot, stack_idx, half);
511         } else {
512           stack_transfers.LoadIntoRegister(reg, slot, stack_idx);
513         }
514       } else {
515         DCHECK(loc.IsCallerFrameSlot());
516         stack_slots.Add(slot, stack_idx, half);
517       }
518     }
519   }
520   // {call_desc_input_idx} should point after the instance parameter now.
521   DCHECK_EQ(call_desc_input_idx, kInputShift + 1);
522 
523   // If the target register overlaps with a parameter register, then move the
524   // target to another free register, or spill to the stack.
525   if (target && param_regs.has(LiftoffRegister(*target))) {
526     // Try to find another free register.
527     LiftoffRegList free_regs = kGpCacheRegList.MaskOut(param_regs);
528     if (!free_regs.is_empty()) {
529       LiftoffRegister new_target = free_regs.GetFirstRegSet();
530       stack_transfers.MoveRegister(new_target, LiftoffRegister(*target),
531                                    kWasmIntPtr);
532       *target = new_target.gp();
533     } else {
534       stack_slots.Add(LiftoffAssembler::VarState(LiftoffAssembler::kWasmIntPtr,
535                                                  LiftoffRegister(*target)));
536       *target = no_reg;
537     }
538   }
539 
540   // Create all the slots.
541   stack_slots.Construct();
542   // Execute the stack transfers before filling the instance register.
543   stack_transfers.Execute();
544 
545   // Pop parameters from the value stack.
546   auto stack_end = cache_state_.stack_state.end();
547   cache_state_.stack_state.erase(stack_end - num_params, stack_end);
548 
549   // Reset register use counters.
550   cache_state_.reset_used_registers();
551 
552   // Reload the instance from the stack.
553   if (!target_instance) {
554     FillInstanceInto(instance_reg.gp());
555   }
556 }
557 
FinishCall(FunctionSig * sig,compiler::CallDescriptor * call_descriptor)558 void LiftoffAssembler::FinishCall(FunctionSig* sig,
559                                   compiler::CallDescriptor* call_descriptor) {
560   const size_t return_count = sig->return_count();
561   if (return_count != 0) {
562     DCHECK_EQ(1, return_count);
563     ValueType return_type = sig->GetReturn(0);
564     const bool need_pair = kNeedI64RegPair && return_type == kWasmI64;
565     DCHECK_EQ(need_pair ? 2 : 1, call_descriptor->ReturnCount());
566     RegClass rc = need_pair ? kGpReg : reg_class_for(return_type);
567     LiftoffRegister return_reg = LiftoffRegister::from_code(
568         rc, call_descriptor->GetReturnLocation(0).AsRegister());
569     DCHECK(GetCacheRegList(rc).has(return_reg));
570     if (need_pair) {
571       LiftoffRegister high_reg = LiftoffRegister::from_code(
572           rc, call_descriptor->GetReturnLocation(1).AsRegister());
573       DCHECK(GetCacheRegList(rc).has(high_reg));
574       return_reg = LiftoffRegister::ForPair(return_reg.gp(), high_reg.gp());
575     }
576     DCHECK(!cache_state_.is_used(return_reg));
577     PushRegister(return_type, return_reg);
578   }
579 }
580 
Move(LiftoffRegister dst,LiftoffRegister src,ValueType type)581 void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src,
582                             ValueType type) {
583   DCHECK_EQ(dst.reg_class(), src.reg_class());
584   DCHECK_NE(dst, src);
585   if (kNeedI64RegPair && dst.is_pair()) {
586     // Use the {StackTransferRecipe} to move pairs, as the registers in the
587     // pairs might overlap.
588     StackTransferRecipe(this).MoveRegister(dst, src, type);
589   } else if (dst.is_gp()) {
590     Move(dst.gp(), src.gp(), type);
591   } else {
592     Move(dst.fp(), src.fp(), type);
593   }
594 }
595 
ParallelRegisterMove(std::initializer_list<ParallelRegisterMoveTuple> tuples)596 void LiftoffAssembler::ParallelRegisterMove(
597     std::initializer_list<ParallelRegisterMoveTuple> tuples) {
598   StackTransferRecipe stack_transfers(this);
599   for (auto tuple : tuples) {
600     if (tuple.dst == tuple.src) continue;
601     stack_transfers.MoveRegister(tuple.dst, tuple.src, tuple.type);
602   }
603 }
604 
ValidateCacheState() const605 bool LiftoffAssembler::ValidateCacheState() const {
606   uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0};
607   LiftoffRegList used_regs;
608   for (const VarState& var : cache_state_.stack_state) {
609     if (!var.is_reg()) continue;
610     LiftoffRegister reg = var.reg();
611     if (kNeedI64RegPair && reg.is_pair()) {
612       ++register_use_count[reg.low().liftoff_code()];
613       ++register_use_count[reg.high().liftoff_code()];
614     } else {
615       ++register_use_count[reg.liftoff_code()];
616     }
617     used_regs.set(reg);
618   }
619   bool valid = memcmp(register_use_count, cache_state_.register_use_count,
620                       sizeof(register_use_count)) == 0 &&
621                used_regs == cache_state_.used_registers;
622   if (valid) return true;
623   std::ostringstream os;
624   os << "Error in LiftoffAssembler::ValidateCacheState().\n";
625   os << "expected: used_regs " << used_regs << ", counts "
626      << PrintCollection(register_use_count) << "\n";
627   os << "found:    used_regs " << cache_state_.used_registers << ", counts "
628      << PrintCollection(cache_state_.register_use_count) << "\n";
629   os << "Use --trace-liftoff to debug.";
630   FATAL("%s", os.str().c_str());
631 }
632 
SpillOneRegister(LiftoffRegList candidates,LiftoffRegList pinned)633 LiftoffRegister LiftoffAssembler::SpillOneRegister(LiftoffRegList candidates,
634                                                    LiftoffRegList pinned) {
635   // Spill one cached value to free a register.
636   LiftoffRegister spill_reg = cache_state_.GetNextSpillReg(candidates, pinned);
637   SpillRegister(spill_reg);
638   return spill_reg;
639 }
640 
SpillRegister(LiftoffRegister reg)641 void LiftoffAssembler::SpillRegister(LiftoffRegister reg) {
642   int remaining_uses = cache_state_.get_use_count(reg);
643   DCHECK_LT(0, remaining_uses);
644   for (uint32_t idx = cache_state_.stack_height() - 1;; --idx) {
645     DCHECK_GT(cache_state_.stack_height(), idx);
646     auto* slot = &cache_state_.stack_state[idx];
647     if (!slot->is_reg() || !slot->reg().overlaps(reg)) continue;
648     if (slot->reg().is_pair()) {
649       // Make sure to decrement *both* registers in a pair, because the
650       // {clear_used} call below only clears one of them.
651       cache_state_.dec_used(slot->reg().low());
652       cache_state_.dec_used(slot->reg().high());
653     }
654     Spill(idx, slot->reg(), slot->type());
655     slot->MakeStack();
656     if (--remaining_uses == 0) break;
657   }
658   cache_state_.clear_used(reg);
659 }
660 
set_num_locals(uint32_t num_locals)661 void LiftoffAssembler::set_num_locals(uint32_t num_locals) {
662   DCHECK_EQ(0, num_locals_);  // only call this once.
663   num_locals_ = num_locals;
664   if (num_locals > kInlineLocalTypes) {
665     more_local_types_ =
666         reinterpret_cast<ValueType*>(malloc(num_locals * sizeof(ValueType)));
667     DCHECK_NOT_NULL(more_local_types_);
668   }
669 }
670 
operator <<(std::ostream & os,VarState slot)671 std::ostream& operator<<(std::ostream& os, VarState slot) {
672   os << ValueTypes::TypeName(slot.type()) << ":";
673   switch (slot.loc()) {
674     case VarState::kStack:
675       return os << "s";
676     case VarState::kRegister:
677       return os << slot.reg();
678     case VarState::KIntConst:
679       return os << "c" << slot.i32_const();
680   }
681   UNREACHABLE();
682 }
683 
684 #undef __
685 #undef TRACE
686 
687 }  // namespace wasm
688 }  // namespace internal
689 }  // namespace v8
690