• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/wasm/baseline/liftoff-compiler.h"
6 
7 #include "src/assembler-inl.h"
8 #include "src/base/optional.h"
9 // TODO(clemensh): Remove dependences on compiler stuff.
10 #include "src/compiler/linkage.h"
11 #include "src/compiler/wasm-compiler.h"
12 #include "src/counters.h"
13 #include "src/macro-assembler-inl.h"
14 #include "src/tracing/trace-event.h"
15 #include "src/wasm/baseline/liftoff-assembler.h"
16 #include "src/wasm/function-body-decoder-impl.h"
17 #include "src/wasm/function-compiler.h"
18 #include "src/wasm/memory-tracing.h"
19 #include "src/wasm/wasm-engine.h"
20 #include "src/wasm/wasm-linkage.h"
21 #include "src/wasm/wasm-objects.h"
22 #include "src/wasm/wasm-opcodes.h"
23 
24 namespace v8 {
25 namespace internal {
26 namespace wasm {
27 
28 constexpr auto kRegister = LiftoffAssembler::VarState::kRegister;
29 constexpr auto KIntConst = LiftoffAssembler::VarState::KIntConst;
30 constexpr auto kStack = LiftoffAssembler::VarState::kStack;
31 
32 namespace {
33 
34 #define __ asm_.
35 
36 #define TRACE(...)                                            \
37   do {                                                        \
38     if (FLAG_trace_liftoff) PrintF("[liftoff] " __VA_ARGS__); \
39   } while (false)
40 
41 #define WASM_INSTANCE_OBJECT_OFFSET(name) \
42   (WasmInstanceObject::k##name##Offset - kHeapObjectTag)
43 
44 #define LOAD_INSTANCE_FIELD(dst, name, type)                       \
45   __ LoadFromInstance(dst.gp(), WASM_INSTANCE_OBJECT_OFFSET(name), \
46                       LoadType(type).size());
47 
48 #ifdef DEBUG
49 #define DEBUG_CODE_COMMENT(str) \
50   do {                          \
51     __ RecordComment(str);      \
52   } while (false)
53 #else
54 #define DEBUG_CODE_COMMENT(str) ((void)0)
55 #endif
56 
57 constexpr LoadType::LoadTypeValue kPointerLoadType =
58     kPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load;
59 
60 #if V8_TARGET_ARCH_ARM64
61 // On ARM64, the Assembler keeps track of pointers to Labels to resolve
62 // branches to distant targets. Moving labels would confuse the Assembler,
63 // thus store the label on the heap and keep a unique_ptr.
64 class MovableLabel {
65  public:
66   MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(MovableLabel);
MovableLabel()67   MovableLabel() : label_(new Label()) {}
68 
get()69   Label* get() { return label_.get(); }
70 
71  private:
72   std::unique_ptr<Label> label_;
73 };
74 #else
75 // On all other platforms, just store the Label directly.
76 class MovableLabel {
77  public:
78   MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(MovableLabel);
79 
get()80   Label* get() { return &label_; }
81 
82  private:
83   Label label_;
84 };
85 #endif
86 
GetLoweredCallDescriptor(Zone * zone,compiler::CallDescriptor * call_desc)87 compiler::CallDescriptor* GetLoweredCallDescriptor(
88     Zone* zone, compiler::CallDescriptor* call_desc) {
89   return kPointerSize == 4 ? compiler::GetI32WasmCallDescriptor(zone, call_desc)
90                            : call_desc;
91 }
92 
93 constexpr ValueType kTypesArr_ilfd[] = {kWasmI32, kWasmI64, kWasmF32, kWasmF64};
94 constexpr Vector<const ValueType> kTypes_ilfd = ArrayVector(kTypesArr_ilfd);
95 
96 class LiftoffCompiler {
97  public:
98   MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(LiftoffCompiler);
99 
100   // TODO(clemensh): Make this a template parameter.
101   static constexpr Decoder::ValidateFlag validate = Decoder::kValidate;
102 
103   using Value = ValueBase;
104 
105   struct ElseState {
106     MovableLabel label;
107     LiftoffAssembler::CacheState state;
108   };
109 
110   struct Control : public ControlWithNamedConstructors<Control, Value> {
111     MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(Control);
112 
113     std::unique_ptr<ElseState> else_state;
114     LiftoffAssembler::CacheState label_state;
115     MovableLabel label;
116   };
117 
118   using FullDecoder = WasmFullDecoder<validate, LiftoffCompiler>;
119 
120   struct OutOfLineCode {
121     MovableLabel label;
122     MovableLabel continuation;
123     WasmCode::RuntimeStubId stub;
124     WasmCodePosition position;
125     LiftoffRegList regs_to_save;
126     uint32_t pc;  // for trap handler.
127 
128     // Named constructors:
Trapv8::internal::wasm::__anon207ad58d0111::LiftoffCompiler::OutOfLineCode129     static OutOfLineCode Trap(WasmCode::RuntimeStubId s, WasmCodePosition pos,
130                               uint32_t pc) {
131       DCHECK_LT(0, pos);
132       return {{}, {}, s, pos, {}, pc};
133     }
StackCheckv8::internal::wasm::__anon207ad58d0111::LiftoffCompiler::OutOfLineCode134     static OutOfLineCode StackCheck(WasmCodePosition pos, LiftoffRegList regs) {
135       return {{}, {}, WasmCode::kWasmStackGuard, pos, regs, 0};
136     }
137   };
138 
LiftoffCompiler(compiler::CallDescriptor * call_descriptor,ModuleEnv * env,Zone * compilation_zone)139   LiftoffCompiler(compiler::CallDescriptor* call_descriptor, ModuleEnv* env,
140                   Zone* compilation_zone)
141       : descriptor_(
142             GetLoweredCallDescriptor(compilation_zone, call_descriptor)),
143         env_(env),
144         compilation_zone_(compilation_zone),
145         safepoint_table_builder_(compilation_zone_) {}
146 
~LiftoffCompiler()147   ~LiftoffCompiler() { BindUnboundLabels(nullptr); }
148 
ok() const149   bool ok() const { return ok_; }
150 
GetCode(CodeDesc * desc)151   void GetCode(CodeDesc* desc) { asm_.GetCode(nullptr, desc); }
152 
GetSourcePositionTable()153   OwnedVector<uint8_t> GetSourcePositionTable() {
154     return source_position_table_builder_.ToSourcePositionTableVector();
155   }
156 
GetProtectedInstructions() const157   OwnedVector<trap_handler::ProtectedInstructionData> GetProtectedInstructions()
158       const {
159     return OwnedVector<trap_handler::ProtectedInstructionData>::Of(
160         protected_instructions_);
161   }
162 
GetTotalFrameSlotCount() const163   uint32_t GetTotalFrameSlotCount() const {
164     return __ GetTotalFrameSlotCount();
165   }
166 
unsupported(FullDecoder * decoder,const char * reason)167   void unsupported(FullDecoder* decoder, const char* reason) {
168     ok_ = false;
169     TRACE("unsupported: %s\n", reason);
170     decoder->errorf(decoder->pc(), "unsupported liftoff operation: %s", reason);
171     BindUnboundLabels(decoder);
172   }
173 
DidAssemblerBailout(FullDecoder * decoder)174   bool DidAssemblerBailout(FullDecoder* decoder) {
175     if (decoder->failed() || !__ did_bailout()) return false;
176     unsupported(decoder, __ bailout_reason());
177     return true;
178   }
179 
CheckSupportedType(FullDecoder * decoder,Vector<const ValueType> supported_types,ValueType type,const char * context)180   bool CheckSupportedType(FullDecoder* decoder,
181                           Vector<const ValueType> supported_types,
182                           ValueType type, const char* context) {
183     char buffer[128];
184     // Check supported types.
185     for (ValueType supported : supported_types) {
186       if (type == supported) return true;
187     }
188     SNPrintF(ArrayVector(buffer), "%s %s", ValueTypes::TypeName(type), context);
189     unsupported(decoder, buffer);
190     return false;
191   }
192 
GetSafepointTableOffset() const193   int GetSafepointTableOffset() const {
194     return safepoint_table_builder_.GetCodeOffset();
195   }
196 
BindUnboundLabels(FullDecoder * decoder)197   void BindUnboundLabels(FullDecoder* decoder) {
198 #ifdef DEBUG
199     // Bind all labels now, otherwise their destructor will fire a DCHECK error
200     // if they where referenced before.
201     uint32_t control_depth = decoder ? decoder->control_depth() : 0;
202     for (uint32_t i = 0; i < control_depth; ++i) {
203       Control* c = decoder->control_at(i);
204       Label* label = c->label.get();
205       if (!label->is_bound()) __ bind(label);
206       if (c->else_state) {
207         Label* else_label = c->else_state->label.get();
208         if (!else_label->is_bound()) __ bind(else_label);
209       }
210     }
211     for (auto& ool : out_of_line_code_) {
212       if (!ool.label.get()->is_bound()) __ bind(ool.label.get());
213     }
214 #endif
215   }
216 
StartFunction(FullDecoder * decoder)217   void StartFunction(FullDecoder* decoder) {
218     int num_locals = decoder->NumLocals();
219     __ set_num_locals(num_locals);
220     for (int i = 0; i < num_locals; ++i) {
221       __ set_local_type(i, decoder->GetLocalType(i));
222     }
223   }
224 
CollectReservedRegsForParameters(uint32_t input_idx_start,uint32_t num_params,LiftoffRegList & param_regs)225   void CollectReservedRegsForParameters(uint32_t input_idx_start,
226                                         uint32_t num_params,
227                                         LiftoffRegList& param_regs) {
228     uint32_t input_idx = input_idx_start;
229     for (uint32_t param_idx = 0; param_idx < num_params; ++param_idx) {
230       ValueType type = __ local_type(param_idx);
231       const int num_lowered_params = 1 + needs_reg_pair(type);
232       RegClass rc = num_lowered_params == 1 ? reg_class_for(type) : kGpReg;
233 
234       for (int pair_idx = 0; pair_idx < num_lowered_params; ++pair_idx) {
235         compiler::LinkageLocation param_loc =
236             descriptor_->GetInputLocation(input_idx + pair_idx);
237         if (param_loc.IsRegister()) {
238           DCHECK(!param_loc.IsAnyRegister());
239           int reg_code = param_loc.AsRegister();
240           RegList cache_regs = rc == kGpReg ? kLiftoffAssemblerGpCacheRegs
241                                             : kLiftoffAssemblerFpCacheRegs;
242           if (cache_regs & (1 << reg_code)) {
243             LiftoffRegister in_reg = LiftoffRegister::from_code(rc, reg_code);
244             param_regs.set(in_reg);
245           }
246         }
247       }
248       input_idx += num_lowered_params;
249     }
250   }
251 
252   // Returns the number of inputs processed (1 or 2).
ProcessParameter(ValueType type,uint32_t input_idx)253   uint32_t ProcessParameter(ValueType type, uint32_t input_idx) {
254     const int num_lowered_params = 1 + needs_reg_pair(type);
255     // Initialize to anything, will be set in the loop and used afterwards.
256     LiftoffRegister reg = LiftoffRegister::from_code(kGpReg, 0);
257     RegClass rc = num_lowered_params == 1 ? reg_class_for(type) : kGpReg;
258     LiftoffRegList pinned;
259     for (int pair_idx = 0; pair_idx < num_lowered_params; ++pair_idx) {
260       compiler::LinkageLocation param_loc =
261           descriptor_->GetInputLocation(input_idx + pair_idx);
262       // Initialize to anything, will be set in both arms of the if.
263       LiftoffRegister in_reg = LiftoffRegister::from_code(kGpReg, 0);
264       if (param_loc.IsRegister()) {
265         DCHECK(!param_loc.IsAnyRegister());
266         int reg_code = param_loc.AsRegister();
267         RegList cache_regs = rc == kGpReg ? kLiftoffAssemblerGpCacheRegs
268                                           : kLiftoffAssemblerFpCacheRegs;
269         if (cache_regs & (1 << reg_code)) {
270           // This is a cache register, just use it.
271           in_reg = LiftoffRegister::from_code(rc, reg_code);
272         } else {
273           // Move to a cache register (spill one if necessary).
274           // Note that we cannot create a {LiftoffRegister} for reg_code, since
275           // {LiftoffRegister} can only store cache regs.
276           LiftoffRegister in_reg = __ GetUnusedRegister(rc, pinned);
277           if (rc == kGpReg) {
278             __ Move(in_reg.gp(), Register::from_code(reg_code), type);
279           } else {
280             __ Move(in_reg.fp(), DoubleRegister::from_code(reg_code), type);
281           }
282         }
283       } else if (param_loc.IsCallerFrameSlot()) {
284         in_reg = __ GetUnusedRegister(rc, pinned);
285         ValueType lowered_type = num_lowered_params == 1 ? type : kWasmI32;
286         __ LoadCallerFrameSlot(in_reg, -param_loc.AsCallerFrameSlot(),
287                                lowered_type);
288       }
289       reg = pair_idx == 0 ? in_reg
290                           : LiftoffRegister::ForPair(reg.gp(), in_reg.gp());
291       pinned.set(reg);
292     }
293     __ PushRegister(type, reg);
294     return num_lowered_params;
295   }
296 
StackCheck(WasmCodePosition position)297   void StackCheck(WasmCodePosition position) {
298     if (FLAG_wasm_no_stack_checks || !env_->runtime_exception_support) return;
299     out_of_line_code_.push_back(
300         OutOfLineCode::StackCheck(position, __ cache_state()->used_registers));
301     OutOfLineCode& ool = out_of_line_code_.back();
302     LiftoffRegister limit_address = __ GetUnusedRegister(kGpReg);
303     LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kPointerLoadType);
304     __ StackCheck(ool.label.get(), limit_address.gp());
305     __ bind(ool.continuation.get());
306   }
307 
StartFunctionBody(FullDecoder * decoder,Control * block)308   void StartFunctionBody(FullDecoder* decoder, Control* block) {
309     for (uint32_t i = 0; i < __ num_locals(); ++i) {
310       if (!CheckSupportedType(decoder, kTypes_ilfd, __ local_type(i), "param"))
311         return;
312     }
313 
314     // Input 0 is the call target, the instance is at 1.
315     constexpr int kInstanceParameterIndex = 1;
316     // Store the instance parameter to a special stack slot.
317     compiler::LinkageLocation instance_loc =
318         descriptor_->GetInputLocation(kInstanceParameterIndex);
319     DCHECK(instance_loc.IsRegister());
320     DCHECK(!instance_loc.IsAnyRegister());
321     Register instance_reg = Register::from_code(instance_loc.AsRegister());
322     DCHECK_EQ(kWasmInstanceRegister, instance_reg);
323 
324     // Parameter 0 is the instance parameter.
325     uint32_t num_params =
326         static_cast<uint32_t>(decoder->sig_->parameter_count());
327 
328     __ EnterFrame(StackFrame::WASM_COMPILED);
329     __ set_has_frame(true);
330     pc_offset_stack_frame_construction_ = __ PrepareStackFrame();
331     // {PrepareStackFrame} is the first platform-specific assembler method.
332     // If this failed, we can bail out immediately, avoiding runtime overhead
333     // and potential failures because of other unimplemented methods.
334     // A platform implementing {PrepareStackFrame} must ensure that we can
335     // finish compilation without errors even if we hit unimplemented
336     // LiftoffAssembler methods.
337     if (DidAssemblerBailout(decoder)) return;
338 
339     __ SpillInstance(instance_reg);
340     // Input 0 is the code target, 1 is the instance. First parameter at 2.
341     uint32_t input_idx = kInstanceParameterIndex + 1;
342     for (uint32_t param_idx = 0; param_idx < num_params; ++param_idx) {
343       input_idx += ProcessParameter(__ local_type(param_idx), input_idx);
344     }
345     DCHECK_EQ(input_idx, descriptor_->InputCount());
346     // Set to a gp register, to mark this uninitialized.
347     LiftoffRegister zero_double_reg(Register::from_code<0>());
348     DCHECK(zero_double_reg.is_gp());
349     for (uint32_t param_idx = num_params; param_idx < __ num_locals();
350          ++param_idx) {
351       ValueType type = decoder->GetLocalType(param_idx);
352       switch (type) {
353         case kWasmI32:
354           __ cache_state()->stack_state.emplace_back(kWasmI32, uint32_t{0});
355           break;
356         case kWasmI64:
357           __ cache_state()->stack_state.emplace_back(kWasmI64, uint32_t{0});
358           break;
359         case kWasmF32:
360         case kWasmF64:
361           if (zero_double_reg.is_gp()) {
362             // Note: This might spill one of the registers used to hold
363             // parameters.
364             zero_double_reg = __ GetUnusedRegister(kFpReg);
365             // Zero is represented by the bit pattern 0 for both f32 and f64.
366             __ LoadConstant(zero_double_reg, WasmValue(0.));
367           }
368           __ PushRegister(type, zero_double_reg);
369           break;
370         default:
371           UNIMPLEMENTED();
372       }
373     }
374     block->label_state.stack_base = __ num_locals();
375 
376     // The function-prologue stack check is associated with position 0, which
377     // is never a position of any instruction in the function.
378     StackCheck(0);
379 
380     DCHECK_EQ(__ num_locals(), __ cache_state()->stack_height());
381   }
382 
GenerateOutOfLineCode(OutOfLineCode & ool)383   void GenerateOutOfLineCode(OutOfLineCode& ool) {
384     __ bind(ool.label.get());
385     const bool is_stack_check = ool.stub == WasmCode::kWasmStackGuard;
386     const bool is_mem_out_of_bounds =
387         ool.stub == WasmCode::kThrowWasmTrapMemOutOfBounds;
388 
389     if (is_mem_out_of_bounds && env_->use_trap_handler) {
390       uint32_t pc = static_cast<uint32_t>(__ pc_offset());
391       DCHECK_EQ(pc, __ pc_offset());
392       protected_instructions_.emplace_back(
393           trap_handler::ProtectedInstructionData{ool.pc, pc});
394     }
395 
396     if (!env_->runtime_exception_support) {
397       // We cannot test calls to the runtime in cctest/test-run-wasm.
398       // Therefore we emit a call to C here instead of a call to the runtime.
399       // In this mode, we never generate stack checks.
400       DCHECK(!is_stack_check);
401       __ CallTrapCallbackForTesting();
402       __ LeaveFrame(StackFrame::WASM_COMPILED);
403       __ DropStackSlotsAndRet(
404           static_cast<uint32_t>(descriptor_->StackParameterCount()));
405       return;
406     }
407 
408     if (!ool.regs_to_save.is_empty()) __ PushRegisters(ool.regs_to_save);
409 
410     source_position_table_builder_.AddPosition(
411         __ pc_offset(), SourcePosition(ool.position), false);
412     __ CallRuntimeStub(ool.stub);
413     safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
414                                              Safepoint::kNoLazyDeopt);
415     DCHECK_EQ(ool.continuation.get()->is_bound(), is_stack_check);
416     if (!ool.regs_to_save.is_empty()) __ PopRegisters(ool.regs_to_save);
417     if (is_stack_check) {
418       __ emit_jump(ool.continuation.get());
419     } else {
420       __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
421     }
422   }
423 
FinishFunction(FullDecoder * decoder)424   void FinishFunction(FullDecoder* decoder) {
425     if (DidAssemblerBailout(decoder)) return;
426     for (OutOfLineCode& ool : out_of_line_code_) {
427       GenerateOutOfLineCode(ool);
428     }
429     __ PatchPrepareStackFrame(pc_offset_stack_frame_construction_,
430                               __ GetTotalFrameSlotCount());
431     __ FinishCode();
432     safepoint_table_builder_.Emit(&asm_, __ GetTotalFrameSlotCount());
433     // The previous calls may have also generated a bailout.
434     DidAssemblerBailout(decoder);
435   }
436 
OnFirstError(FullDecoder * decoder)437   void OnFirstError(FullDecoder* decoder) {
438     ok_ = false;
439     BindUnboundLabels(decoder);
440     asm_.AbortCompilation();
441   }
442 
NextInstruction(FullDecoder * decoder,WasmOpcode opcode)443   void NextInstruction(FullDecoder* decoder, WasmOpcode opcode) {
444     TraceCacheState(decoder);
445     SLOW_DCHECK(__ ValidateCacheState());
446     DEBUG_CODE_COMMENT(WasmOpcodes::OpcodeName(opcode));
447   }
448 
Block(FullDecoder * decoder,Control * block)449   void Block(FullDecoder* decoder, Control* block) {
450     block->label_state.stack_base = __ cache_state()->stack_height();
451   }
452 
Loop(FullDecoder * decoder,Control * loop)453   void Loop(FullDecoder* decoder, Control* loop) {
454     loop->label_state.stack_base = __ cache_state()->stack_height();
455 
456     // Before entering a loop, spill all locals to the stack, in order to free
457     // the cache registers, and to avoid unnecessarily reloading stack values
458     // into registers at branches.
459     // TODO(clemensh): Come up with a better strategy here, involving
460     // pre-analysis of the function.
461     __ SpillLocals();
462 
463     // Loop labels bind at the beginning of the block.
464     __ bind(loop->label.get());
465 
466     // Save the current cache state for the merge when jumping to this loop.
467     loop->label_state.Split(*__ cache_state());
468 
469     // Execute a stack check in the loop header.
470     StackCheck(decoder->position());
471   }
472 
Try(FullDecoder * decoder,Control * block)473   void Try(FullDecoder* decoder, Control* block) {
474     unsupported(decoder, "try");
475   }
476 
If(FullDecoder * decoder,const Value & cond,Control * if_block)477   void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
478     DCHECK_EQ(if_block, decoder->control_at(0));
479     DCHECK(if_block->is_if());
480 
481     if (if_block->start_merge.arity > 0 || if_block->end_merge.arity > 1)
482       return unsupported(decoder, "multi-value if");
483 
484     // Allocate the else state.
485     if_block->else_state = base::make_unique<ElseState>();
486 
487     // Test the condition, jump to else if zero.
488     Register value = __ PopToRegister().gp();
489     __ emit_cond_jump(kEqual, if_block->else_state->label.get(), kWasmI32,
490                       value);
491 
492     if_block->label_state.stack_base = __ cache_state()->stack_height();
493     // Store the state (after popping the value) for executing the else branch.
494     if_block->else_state->state.Split(*__ cache_state());
495   }
496 
FallThruTo(FullDecoder * decoder,Control * c)497   void FallThruTo(FullDecoder* decoder, Control* c) {
498     if (c->end_merge.reached) {
499       __ MergeFullStackWith(c->label_state);
500     } else if (c->is_onearmed_if()) {
501       c->label_state.InitMerge(*__ cache_state(), __ num_locals(),
502                                c->br_merge()->arity);
503       __ MergeFullStackWith(c->label_state);
504     } else {
505       c->label_state.Split(*__ cache_state());
506     }
507     TraceCacheState(decoder);
508   }
509 
PopControl(FullDecoder * decoder,Control * c)510   void PopControl(FullDecoder* decoder, Control* c) {
511     if (!c->is_loop() && c->end_merge.reached) {
512       __ cache_state()->Steal(c->label_state);
513     }
514     if (!c->label.get()->is_bound()) {
515       __ bind(c->label.get());
516     }
517   }
518 
EndControl(FullDecoder * decoder,Control * c)519   void EndControl(FullDecoder* decoder, Control* c) {}
520 
521   enum CCallReturn : bool { kHasReturn = true, kNoReturn = false };
522 
GenerateCCall(const LiftoffRegister * result_regs,FunctionSig * sig,ValueType out_argument_type,const LiftoffRegister * arg_regs,ExternalReference ext_ref)523   void GenerateCCall(const LiftoffRegister* result_regs, FunctionSig* sig,
524                      ValueType out_argument_type,
525                      const LiftoffRegister* arg_regs,
526                      ExternalReference ext_ref) {
527     // Before making a call, spill all cache registers.
528     __ SpillAllRegisters();
529 
530     // Store arguments on our stack, then align the stack for calling to C.
531     int param_bytes = 0;
532     for (ValueType param_type : sig->parameters()) {
533       param_bytes += ValueTypes::MemSize(param_type);
534     }
535     int out_arg_bytes = out_argument_type == kWasmStmt
536                             ? 0
537                             : ValueTypes::MemSize(out_argument_type);
538     int stack_bytes = std::max(param_bytes, out_arg_bytes);
539     __ CallC(sig, arg_regs, result_regs, out_argument_type, stack_bytes,
540              ext_ref);
541   }
542 
543   template <ValueType src_type, ValueType result_type, class EmitFn>
EmitUnOp(EmitFn fn)544   void EmitUnOp(EmitFn fn) {
545     static RegClass src_rc = reg_class_for(src_type);
546     static RegClass result_rc = reg_class_for(result_type);
547     LiftoffRegister src = __ PopToRegister();
548     LiftoffRegister dst = src_rc == result_rc
549                               ? __ GetUnusedRegister(result_rc, {src})
550                               : __ GetUnusedRegister(result_rc);
551     fn(dst, src);
552     __ PushRegister(result_type, dst);
553   }
554 
EmitI32UnOpWithCFallback(bool (LiftoffAssembler::* emit_fn)(Register,Register),ExternalReference (* fallback_fn)())555   void EmitI32UnOpWithCFallback(bool (LiftoffAssembler::*emit_fn)(Register,
556                                                                   Register),
557                                 ExternalReference (*fallback_fn)()) {
558     auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) {
559       if (emit_fn && (asm_.*emit_fn)(dst.gp(), src.gp())) return;
560       ExternalReference ext_ref = fallback_fn();
561       ValueType sig_i_i_reps[] = {kWasmI32, kWasmI32};
562       FunctionSig sig_i_i(1, 1, sig_i_i_reps);
563       GenerateCCall(&dst, &sig_i_i, kWasmStmt, &src, ext_ref);
564     };
565     EmitUnOp<kWasmI32, kWasmI32>(emit_with_c_fallback);
566   }
567 
568   template <ValueType type>
EmitFloatUnOpWithCFallback(bool (LiftoffAssembler::* emit_fn)(DoubleRegister,DoubleRegister),ExternalReference (* fallback_fn)())569   void EmitFloatUnOpWithCFallback(
570       bool (LiftoffAssembler::*emit_fn)(DoubleRegister, DoubleRegister),
571       ExternalReference (*fallback_fn)()) {
572     auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) {
573       if ((asm_.*emit_fn)(dst.fp(), src.fp())) return;
574       ExternalReference ext_ref = fallback_fn();
575       ValueType sig_reps[] = {type};
576       FunctionSig sig(0, 1, sig_reps);
577       GenerateCCall(&dst, &sig, type, &src, ext_ref);
578     };
579     EmitUnOp<type, type>(emit_with_c_fallback);
580   }
581 
582   enum TypeConversionTrapping : bool { kCanTrap = true, kNoTrap = false };
583   template <ValueType dst_type, ValueType src_type,
584             TypeConversionTrapping can_trap>
EmitTypeConversion(WasmOpcode opcode,ExternalReference (* fallback_fn)(),WasmCodePosition trap_position)585   void EmitTypeConversion(WasmOpcode opcode, ExternalReference (*fallback_fn)(),
586                           WasmCodePosition trap_position) {
587     static constexpr RegClass src_rc = reg_class_for(src_type);
588     static constexpr RegClass dst_rc = reg_class_for(dst_type);
589     LiftoffRegister src = __ PopToRegister();
590     LiftoffRegister dst = src_rc == dst_rc ? __ GetUnusedRegister(dst_rc, {src})
591                                            : __ GetUnusedRegister(dst_rc);
592     DCHECK_EQ(!!can_trap, trap_position > 0);
593     Label* trap = can_trap ? AddOutOfLineTrap(
594                                  trap_position,
595                                  WasmCode::kThrowWasmTrapFloatUnrepresentable)
596                            : nullptr;
597     if (!__ emit_type_conversion(opcode, dst, src, trap)) {
598       DCHECK_NOT_NULL(fallback_fn);
599       ExternalReference ext_ref = fallback_fn();
600       if (can_trap) {
601         // External references for potentially trapping conversions return int.
602         ValueType sig_reps[] = {kWasmI32, src_type};
603         FunctionSig sig(1, 1, sig_reps);
604         LiftoffRegister ret_reg =
605             __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst));
606         LiftoffRegister dst_regs[] = {ret_reg, dst};
607         GenerateCCall(dst_regs, &sig, dst_type, &src, ext_ref);
608         __ emit_cond_jump(kEqual, trap, kWasmI32, ret_reg.gp());
609       } else {
610         ValueType sig_reps[] = {src_type};
611         FunctionSig sig(0, 1, sig_reps);
612         GenerateCCall(&dst, &sig, dst_type, &src, ext_ref);
613       }
614     }
615     __ PushRegister(dst_type, dst);
616   }
617 
UnOp(FullDecoder * decoder,WasmOpcode opcode,FunctionSig *,const Value & value,Value * result)618   void UnOp(FullDecoder* decoder, WasmOpcode opcode, FunctionSig*,
619             const Value& value, Value* result) {
620 #define CASE_I32_UNOP(opcode, fn)                       \
621   case WasmOpcode::kExpr##opcode:                       \
622     EmitUnOp<kWasmI32, kWasmI32>(                       \
623         [=](LiftoffRegister dst, LiftoffRegister src) { \
624           __ emit_##fn(dst.gp(), src.gp());             \
625         });                                             \
626     break;
627 #define CASE_FLOAT_UNOP(opcode, type, fn)               \
628   case WasmOpcode::kExpr##opcode:                       \
629     EmitUnOp<kWasm##type, kWasm##type>(                 \
630         [=](LiftoffRegister dst, LiftoffRegister src) { \
631           __ emit_##fn(dst.fp(), src.fp());             \
632         });                                             \
633     break;
634 #define CASE_FLOAT_UNOP_WITH_CFALLBACK(opcode, type, fn)                    \
635   case WasmOpcode::kExpr##opcode:                                           \
636     EmitFloatUnOpWithCFallback<kWasm##type>(&LiftoffAssembler::emit_##fn,   \
637                                             &ExternalReference::wasm_##fn); \
638     break;
639 #define CASE_TYPE_CONVERSION(opcode, dst_type, src_type, ext_ref, can_trap) \
640   case WasmOpcode::kExpr##opcode:                                           \
641     EmitTypeConversion<kWasm##dst_type, kWasm##src_type, can_trap>(         \
642         kExpr##opcode, ext_ref, can_trap ? decoder->position() : 0);        \
643     break;
644     switch (opcode) {
645       CASE_I32_UNOP(I32Eqz, i32_eqz)
646       CASE_I32_UNOP(I32Clz, i32_clz)
647       CASE_I32_UNOP(I32Ctz, i32_ctz)
648       CASE_FLOAT_UNOP(F32Abs, F32, f32_abs)
649       CASE_FLOAT_UNOP(F32Neg, F32, f32_neg)
650       CASE_FLOAT_UNOP(F32Ceil, F32, f32_ceil)
651       CASE_FLOAT_UNOP(F32Floor, F32, f32_floor)
652       CASE_FLOAT_UNOP(F32Trunc, F32, f32_trunc)
653       CASE_FLOAT_UNOP(F32NearestInt, F32, f32_nearest_int)
654       CASE_FLOAT_UNOP(F32Sqrt, F32, f32_sqrt)
655       CASE_FLOAT_UNOP(F64Abs, F64, f64_abs)
656       CASE_FLOAT_UNOP(F64Neg, F64, f64_neg)
657       CASE_FLOAT_UNOP_WITH_CFALLBACK(F64Ceil, F64, f64_ceil)
658       CASE_FLOAT_UNOP_WITH_CFALLBACK(F64Floor, F64, f64_floor)
659       CASE_FLOAT_UNOP_WITH_CFALLBACK(F64Trunc, F64, f64_trunc)
660       CASE_FLOAT_UNOP_WITH_CFALLBACK(F64NearestInt, F64, f64_nearest_int)
661       CASE_FLOAT_UNOP(F64Sqrt, F64, f64_sqrt)
662       CASE_TYPE_CONVERSION(I32ConvertI64, I32, I64, nullptr, kNoTrap)
663       CASE_TYPE_CONVERSION(I32SConvertF32, I32, F32, nullptr, kCanTrap)
664       CASE_TYPE_CONVERSION(I32UConvertF32, I32, F32, nullptr, kCanTrap)
665       CASE_TYPE_CONVERSION(I32SConvertF64, I32, F64, nullptr, kCanTrap)
666       CASE_TYPE_CONVERSION(I32UConvertF64, I32, F64, nullptr, kCanTrap)
667       CASE_TYPE_CONVERSION(I32ReinterpretF32, I32, F32, nullptr, kNoTrap)
668       CASE_TYPE_CONVERSION(I64SConvertI32, I64, I32, nullptr, kNoTrap)
669       CASE_TYPE_CONVERSION(I64UConvertI32, I64, I32, nullptr, kNoTrap)
670       CASE_TYPE_CONVERSION(I64SConvertF32, I64, F32,
671                            &ExternalReference::wasm_float32_to_int64, kCanTrap)
672       CASE_TYPE_CONVERSION(I64UConvertF32, I64, F32,
673                            &ExternalReference::wasm_float32_to_uint64, kCanTrap)
674       CASE_TYPE_CONVERSION(I64SConvertF64, I64, F64,
675                            &ExternalReference::wasm_float64_to_int64, kCanTrap)
676       CASE_TYPE_CONVERSION(I64UConvertF64, I64, F64,
677                            &ExternalReference::wasm_float64_to_uint64, kCanTrap)
678       CASE_TYPE_CONVERSION(I64ReinterpretF64, I64, F64, nullptr, kNoTrap)
679       CASE_TYPE_CONVERSION(F32SConvertI32, F32, I32, nullptr, kNoTrap)
680       CASE_TYPE_CONVERSION(F32UConvertI32, F32, I32, nullptr, kNoTrap)
681       CASE_TYPE_CONVERSION(F32SConvertI64, F32, I64,
682                            &ExternalReference::wasm_int64_to_float32, kNoTrap)
683       CASE_TYPE_CONVERSION(F32UConvertI64, F32, I64,
684                            &ExternalReference::wasm_uint64_to_float32, kNoTrap)
685       CASE_TYPE_CONVERSION(F32ConvertF64, F32, F64, nullptr, kNoTrap)
686       CASE_TYPE_CONVERSION(F32ReinterpretI32, F32, I32, nullptr, kNoTrap)
687       CASE_TYPE_CONVERSION(F64SConvertI32, F64, I32, nullptr, kNoTrap)
688       CASE_TYPE_CONVERSION(F64UConvertI32, F64, I32, nullptr, kNoTrap)
689       CASE_TYPE_CONVERSION(F64SConvertI64, F64, I64,
690                            &ExternalReference::wasm_int64_to_float64, kNoTrap)
691       CASE_TYPE_CONVERSION(F64UConvertI64, F64, I64,
692                            &ExternalReference::wasm_uint64_to_float64, kNoTrap)
693       CASE_TYPE_CONVERSION(F64ConvertF32, F64, F32, nullptr, kNoTrap)
694       CASE_TYPE_CONVERSION(F64ReinterpretI64, F64, I64, nullptr, kNoTrap)
695       case kExprI32Popcnt:
696         EmitI32UnOpWithCFallback(&LiftoffAssembler::emit_i32_popcnt,
697                                  &ExternalReference::wasm_word32_popcnt);
698         break;
699       case WasmOpcode::kExprI64Eqz:
700         EmitUnOp<kWasmI64, kWasmI32>(
701             [=](LiftoffRegister dst, LiftoffRegister src) {
702               __ emit_i64_eqz(dst.gp(), src);
703             });
704         break;
705       default:
706         return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
707     }
708 #undef CASE_I32_UNOP
709 #undef CASE_FLOAT_UNOP
710 #undef CASE_FLOAT_UNOP_WITH_CFALLBACK
711 #undef CASE_TYPE_CONVERSION
712   }
713 
714   template <ValueType src_type, ValueType result_type, typename EmitFn>
EmitBinOp(EmitFn fn)715   void EmitBinOp(EmitFn fn) {
716     static constexpr RegClass src_rc = reg_class_for(src_type);
717     static constexpr RegClass result_rc = reg_class_for(result_type);
718     LiftoffRegister rhs = __ PopToRegister();
719     LiftoffRegister lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs));
720     LiftoffRegister dst = src_rc == result_rc
721                               ? __ GetUnusedRegister(result_rc, {lhs, rhs})
722                               : __ GetUnusedRegister(result_rc);
723     fn(dst, lhs, rhs);
724     __ PushRegister(result_type, dst);
725   }
726 
EmitDivOrRem64CCall(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs,ExternalReference ext_ref,Label * trap_by_zero,Label * trap_unrepresentable=nullptr)727   void EmitDivOrRem64CCall(LiftoffRegister dst, LiftoffRegister lhs,
728                            LiftoffRegister rhs, ExternalReference ext_ref,
729                            Label* trap_by_zero,
730                            Label* trap_unrepresentable = nullptr) {
731     // Cannot emit native instructions, build C call.
732     LiftoffRegister ret =
733         __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst));
734     LiftoffRegister tmp =
735         __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst, ret));
736     LiftoffRegister arg_regs[] = {lhs, rhs};
737     LiftoffRegister result_regs[] = {ret, dst};
738     ValueType sig_types[] = {kWasmI32, kWasmI64, kWasmI64};
739     // <i64, i64> -> i32 (with i64 output argument)
740     FunctionSig sig(1, 2, sig_types);
741     GenerateCCall(result_regs, &sig, kWasmI64, arg_regs, ext_ref);
742     __ LoadConstant(tmp, WasmValue(int32_t{0}));
743     __ emit_cond_jump(kEqual, trap_by_zero, kWasmI32, ret.gp(), tmp.gp());
744     if (trap_unrepresentable) {
745       __ LoadConstant(tmp, WasmValue(int32_t{-1}));
746       __ emit_cond_jump(kEqual, trap_unrepresentable, kWasmI32, ret.gp(),
747                         tmp.gp());
748     }
749   }
750 
BinOp(FullDecoder * decoder,WasmOpcode opcode,FunctionSig *,const Value & lhs,const Value & rhs,Value * result)751   void BinOp(FullDecoder* decoder, WasmOpcode opcode, FunctionSig*,
752              const Value& lhs, const Value& rhs, Value* result) {
753 #define CASE_I32_BINOP(opcode, fn)                                           \
754   case WasmOpcode::kExpr##opcode:                                            \
755     return EmitBinOp<kWasmI32, kWasmI32>(                                    \
756         [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
757           __ emit_##fn(dst.gp(), lhs.gp(), rhs.gp());                        \
758         });
759 #define CASE_I64_BINOP(opcode, fn)                                           \
760   case WasmOpcode::kExpr##opcode:                                            \
761     return EmitBinOp<kWasmI64, kWasmI64>(                                    \
762         [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
763           __ emit_##fn(dst, lhs, rhs);                                       \
764         });
765 #define CASE_FLOAT_BINOP(opcode, type, fn)                                   \
766   case WasmOpcode::kExpr##opcode:                                            \
767     return EmitBinOp<kWasm##type, kWasm##type>(                              \
768         [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
769           __ emit_##fn(dst.fp(), lhs.fp(), rhs.fp());                        \
770         });
771 #define CASE_I32_CMPOP(opcode, cond)                                         \
772   case WasmOpcode::kExpr##opcode:                                            \
773     return EmitBinOp<kWasmI32, kWasmI32>(                                    \
774         [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
775           __ emit_i32_set_cond(cond, dst.gp(), lhs.gp(), rhs.gp());          \
776         });
777 #define CASE_I64_CMPOP(opcode, cond)                                         \
778   case WasmOpcode::kExpr##opcode:                                            \
779     return EmitBinOp<kWasmI64, kWasmI32>(                                    \
780         [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
781           __ emit_i64_set_cond(cond, dst.gp(), lhs, rhs);                    \
782         });
783 #define CASE_F32_CMPOP(opcode, cond)                                         \
784   case WasmOpcode::kExpr##opcode:                                            \
785     return EmitBinOp<kWasmF32, kWasmI32>(                                    \
786         [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
787           __ emit_f32_set_cond(cond, dst.gp(), lhs.fp(), rhs.fp());          \
788         });
789 #define CASE_F64_CMPOP(opcode, cond)                                         \
790   case WasmOpcode::kExpr##opcode:                                            \
791     return EmitBinOp<kWasmF64, kWasmI32>(                                    \
792         [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
793           __ emit_f64_set_cond(cond, dst.gp(), lhs.fp(), rhs.fp());          \
794         });
795 #define CASE_I32_SHIFTOP(opcode, fn)                                         \
796   case WasmOpcode::kExpr##opcode:                                            \
797     return EmitBinOp<kWasmI32, kWasmI32>(                                    \
798         [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
799           __ emit_##fn(dst.gp(), lhs.gp(), rhs.gp(), {});                    \
800         });
801 #define CASE_I64_SHIFTOP(opcode, fn)                                           \
802   case WasmOpcode::kExpr##opcode:                                              \
803     return EmitBinOp<kWasmI64, kWasmI64>([=](LiftoffRegister dst,              \
804                                              LiftoffRegister src,              \
805                                              LiftoffRegister amount) {         \
806       __ emit_##fn(dst, src, amount.is_pair() ? amount.low_gp() : amount.gp(), \
807                    {});                                                        \
808     });
809 #define CASE_CCALL_BINOP(opcode, type, ext_ref_fn)                           \
810   case WasmOpcode::kExpr##opcode:                                            \
811     return EmitBinOp<kWasmI32, kWasmI32>(                                    \
812         [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
813           LiftoffRegister args[] = {lhs, rhs};                               \
814           auto ext_ref = ExternalReference::ext_ref_fn();                    \
815           ValueType sig_i_ii_reps[] = {kWasmI32, kWasmI32, kWasmI32};        \
816           FunctionSig sig_i_ii(1, 2, sig_i_ii_reps);                         \
817           GenerateCCall(&dst, &sig_i_ii, kWasmStmt, args, ext_ref);          \
818         });
819     switch (opcode) {
820       CASE_I32_BINOP(I32Add, i32_add)
821       CASE_I32_BINOP(I32Sub, i32_sub)
822       CASE_I32_BINOP(I32Mul, i32_mul)
823       CASE_I32_BINOP(I32And, i32_and)
824       CASE_I32_BINOP(I32Ior, i32_or)
825       CASE_I32_BINOP(I32Xor, i32_xor)
826       CASE_I64_BINOP(I64And, i64_and)
827       CASE_I64_BINOP(I64Ior, i64_or)
828       CASE_I64_BINOP(I64Xor, i64_xor)
829       CASE_I32_CMPOP(I32Eq, kEqual)
830       CASE_I32_CMPOP(I32Ne, kUnequal)
831       CASE_I32_CMPOP(I32LtS, kSignedLessThan)
832       CASE_I32_CMPOP(I32LtU, kUnsignedLessThan)
833       CASE_I32_CMPOP(I32GtS, kSignedGreaterThan)
834       CASE_I32_CMPOP(I32GtU, kUnsignedGreaterThan)
835       CASE_I32_CMPOP(I32LeS, kSignedLessEqual)
836       CASE_I32_CMPOP(I32LeU, kUnsignedLessEqual)
837       CASE_I32_CMPOP(I32GeS, kSignedGreaterEqual)
838       CASE_I32_CMPOP(I32GeU, kUnsignedGreaterEqual)
839       CASE_I64_BINOP(I64Add, i64_add)
840       CASE_I64_BINOP(I64Sub, i64_sub)
841       CASE_I64_BINOP(I64Mul, i64_mul)
842       CASE_I64_CMPOP(I64Eq, kEqual)
843       CASE_I64_CMPOP(I64Ne, kUnequal)
844       CASE_I64_CMPOP(I64LtS, kSignedLessThan)
845       CASE_I64_CMPOP(I64LtU, kUnsignedLessThan)
846       CASE_I64_CMPOP(I64GtS, kSignedGreaterThan)
847       CASE_I64_CMPOP(I64GtU, kUnsignedGreaterThan)
848       CASE_I64_CMPOP(I64LeS, kSignedLessEqual)
849       CASE_I64_CMPOP(I64LeU, kUnsignedLessEqual)
850       CASE_I64_CMPOP(I64GeS, kSignedGreaterEqual)
851       CASE_I64_CMPOP(I64GeU, kUnsignedGreaterEqual)
852       CASE_F32_CMPOP(F32Eq, kEqual)
853       CASE_F32_CMPOP(F32Ne, kUnequal)
854       CASE_F32_CMPOP(F32Lt, kUnsignedLessThan)
855       CASE_F32_CMPOP(F32Gt, kUnsignedGreaterThan)
856       CASE_F32_CMPOP(F32Le, kUnsignedLessEqual)
857       CASE_F32_CMPOP(F32Ge, kUnsignedGreaterEqual)
858       CASE_F64_CMPOP(F64Eq, kEqual)
859       CASE_F64_CMPOP(F64Ne, kUnequal)
860       CASE_F64_CMPOP(F64Lt, kUnsignedLessThan)
861       CASE_F64_CMPOP(F64Gt, kUnsignedGreaterThan)
862       CASE_F64_CMPOP(F64Le, kUnsignedLessEqual)
863       CASE_F64_CMPOP(F64Ge, kUnsignedGreaterEqual)
864       CASE_I32_SHIFTOP(I32Shl, i32_shl)
865       CASE_I32_SHIFTOP(I32ShrS, i32_sar)
866       CASE_I32_SHIFTOP(I32ShrU, i32_shr)
867       CASE_I64_SHIFTOP(I64Shl, i64_shl)
868       CASE_I64_SHIFTOP(I64ShrS, i64_sar)
869       CASE_I64_SHIFTOP(I64ShrU, i64_shr)
870       CASE_CCALL_BINOP(I32Rol, I32, wasm_word32_rol)
871       CASE_CCALL_BINOP(I32Ror, I32, wasm_word32_ror)
872       CASE_FLOAT_BINOP(F32Add, F32, f32_add)
873       CASE_FLOAT_BINOP(F32Sub, F32, f32_sub)
874       CASE_FLOAT_BINOP(F32Mul, F32, f32_mul)
875       CASE_FLOAT_BINOP(F32Div, F32, f32_div)
876       CASE_FLOAT_BINOP(F32Min, F32, f32_min)
877       CASE_FLOAT_BINOP(F32Max, F32, f32_max)
878       CASE_FLOAT_BINOP(F64Add, F64, f64_add)
879       CASE_FLOAT_BINOP(F64Sub, F64, f64_sub)
880       CASE_FLOAT_BINOP(F64Mul, F64, f64_mul)
881       CASE_FLOAT_BINOP(F64Div, F64, f64_div)
882       CASE_FLOAT_BINOP(F64Min, F64, f64_min)
883       CASE_FLOAT_BINOP(F64Max, F64, f64_max)
884       case WasmOpcode::kExprI32DivS:
885         EmitBinOp<kWasmI32, kWasmI32>([this, decoder](LiftoffRegister dst,
886                                                       LiftoffRegister lhs,
887                                                       LiftoffRegister rhs) {
888           WasmCodePosition position = decoder->position();
889           AddOutOfLineTrap(position, WasmCode::kThrowWasmTrapDivByZero);
890           // Adding the second trap might invalidate the pointer returned for
891           // the first one, thus get both pointers afterwards.
892           AddOutOfLineTrap(position,
893                            WasmCode::kThrowWasmTrapDivUnrepresentable);
894           Label* div_by_zero = out_of_line_code_.end()[-2].label.get();
895           Label* div_unrepresentable = out_of_line_code_.end()[-1].label.get();
896           __ emit_i32_divs(dst.gp(), lhs.gp(), rhs.gp(), div_by_zero,
897                            div_unrepresentable);
898         });
899         break;
900       case WasmOpcode::kExprI32DivU:
901         EmitBinOp<kWasmI32, kWasmI32>([this, decoder](LiftoffRegister dst,
902                                                       LiftoffRegister lhs,
903                                                       LiftoffRegister rhs) {
904           Label* div_by_zero = AddOutOfLineTrap(
905               decoder->position(), WasmCode::kThrowWasmTrapDivByZero);
906           __ emit_i32_divu(dst.gp(), lhs.gp(), rhs.gp(), div_by_zero);
907         });
908         break;
909       case WasmOpcode::kExprI32RemS:
910         EmitBinOp<kWasmI32, kWasmI32>([this, decoder](LiftoffRegister dst,
911                                                       LiftoffRegister lhs,
912                                                       LiftoffRegister rhs) {
913           Label* rem_by_zero = AddOutOfLineTrap(
914               decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
915           __ emit_i32_rems(dst.gp(), lhs.gp(), rhs.gp(), rem_by_zero);
916         });
917         break;
918       case WasmOpcode::kExprI32RemU:
919         EmitBinOp<kWasmI32, kWasmI32>([this, decoder](LiftoffRegister dst,
920                                                       LiftoffRegister lhs,
921                                                       LiftoffRegister rhs) {
922           Label* rem_by_zero = AddOutOfLineTrap(
923               decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
924           __ emit_i32_remu(dst.gp(), lhs.gp(), rhs.gp(), rem_by_zero);
925         });
926         break;
927       case WasmOpcode::kExprI64DivS:
928         EmitBinOp<kWasmI64, kWasmI64>([this, decoder](LiftoffRegister dst,
929                                                       LiftoffRegister lhs,
930                                                       LiftoffRegister rhs) {
931           WasmCodePosition position = decoder->position();
932           AddOutOfLineTrap(position, WasmCode::kThrowWasmTrapDivByZero);
933           // Adding the second trap might invalidate the pointer returned for
934           // the first one, thus get both pointers afterwards.
935           AddOutOfLineTrap(position,
936                            WasmCode::kThrowWasmTrapDivUnrepresentable);
937           Label* div_by_zero = out_of_line_code_.end()[-2].label.get();
938           Label* div_unrepresentable = out_of_line_code_.end()[-1].label.get();
939           if (!__ emit_i64_divs(dst, lhs, rhs, div_by_zero,
940                                 div_unrepresentable)) {
941             ExternalReference ext_ref = ExternalReference::wasm_int64_div();
942             EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, div_by_zero,
943                                 div_unrepresentable);
944           }
945         });
946         break;
947       case WasmOpcode::kExprI64DivU:
948         EmitBinOp<kWasmI64, kWasmI64>([this, decoder](LiftoffRegister dst,
949                                                       LiftoffRegister lhs,
950                                                       LiftoffRegister rhs) {
951           Label* div_by_zero = AddOutOfLineTrap(
952               decoder->position(), WasmCode::kThrowWasmTrapDivByZero);
953           if (!__ emit_i64_divu(dst, lhs, rhs, div_by_zero)) {
954             ExternalReference ext_ref = ExternalReference::wasm_uint64_div();
955             EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, div_by_zero);
956           }
957         });
958         break;
959       case WasmOpcode::kExprI64RemS:
960         EmitBinOp<kWasmI64, kWasmI64>([this, decoder](LiftoffRegister dst,
961                                                       LiftoffRegister lhs,
962                                                       LiftoffRegister rhs) {
963           Label* rem_by_zero = AddOutOfLineTrap(
964               decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
965           if (!__ emit_i64_rems(dst, lhs, rhs, rem_by_zero)) {
966             ExternalReference ext_ref = ExternalReference::wasm_int64_mod();
967             EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, rem_by_zero);
968           }
969         });
970         break;
971       case WasmOpcode::kExprI64RemU:
972         EmitBinOp<kWasmI64, kWasmI64>([this, decoder](LiftoffRegister dst,
973                                                       LiftoffRegister lhs,
974                                                       LiftoffRegister rhs) {
975           Label* rem_by_zero = AddOutOfLineTrap(
976               decoder->position(), WasmCode::kThrowWasmTrapRemByZero);
977           if (!__ emit_i64_remu(dst, lhs, rhs, rem_by_zero)) {
978             ExternalReference ext_ref = ExternalReference::wasm_uint64_mod();
979             EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, rem_by_zero);
980           }
981         });
982         break;
983       default:
984         return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
985     }
986 #undef CASE_I32_BINOP
987 #undef CASE_I64_BINOP
988 #undef CASE_FLOAT_BINOP
989 #undef CASE_I32_CMPOP
990 #undef CASE_I64_CMPOP
991 #undef CASE_F32_CMPOP
992 #undef CASE_F64_CMPOP
993 #undef CASE_I32_SHIFTOP
994 #undef CASE_I64_SHIFTOP
995 #undef CASE_CCALL_BINOP
996   }
997 
I32Const(FullDecoder * decoder,Value * result,int32_t value)998   void I32Const(FullDecoder* decoder, Value* result, int32_t value) {
999     __ cache_state()->stack_state.emplace_back(kWasmI32, value);
1000   }
1001 
I64Const(FullDecoder * decoder,Value * result,int64_t value)1002   void I64Const(FullDecoder* decoder, Value* result, int64_t value) {
1003     // The {VarState} stores constant values as int32_t, thus we only store
1004     // 64-bit constants in this field if it fits in an int32_t. Larger values
1005     // cannot be used as immediate value anyway, so we can also just put them in
1006     // a register immediately.
1007     int32_t value_i32 = static_cast<int32_t>(value);
1008     if (value_i32 == value) {
1009       __ cache_state()->stack_state.emplace_back(kWasmI64, value_i32);
1010     } else {
1011       LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kWasmI64));
1012       __ LoadConstant(reg, WasmValue(value));
1013       __ PushRegister(kWasmI64, reg);
1014     }
1015   }
1016 
F32Const(FullDecoder * decoder,Value * result,float value)1017   void F32Const(FullDecoder* decoder, Value* result, float value) {
1018     LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
1019     __ LoadConstant(reg, WasmValue(value));
1020     __ PushRegister(kWasmF32, reg);
1021   }
1022 
F64Const(FullDecoder * decoder,Value * result,double value)1023   void F64Const(FullDecoder* decoder, Value* result, double value) {
1024     LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
1025     __ LoadConstant(reg, WasmValue(value));
1026     __ PushRegister(kWasmF64, reg);
1027   }
1028 
RefNull(FullDecoder * decoder,Value * result)1029   void RefNull(FullDecoder* decoder, Value* result) {
1030     unsupported(decoder, "ref_null");
1031   }
1032 
Drop(FullDecoder * decoder,const Value & value)1033   void Drop(FullDecoder* decoder, const Value& value) {
1034     auto& slot = __ cache_state()->stack_state.back();
1035     // If the dropped slot contains a register, decrement it's use count.
1036     if (slot.is_reg()) __ cache_state()->dec_used(slot.reg());
1037     __ cache_state()->stack_state.pop_back();
1038   }
1039 
DoReturn(FullDecoder * decoder,Vector<Value> values,bool implicit)1040   void DoReturn(FullDecoder* decoder, Vector<Value> values, bool implicit) {
1041     if (implicit) {
1042       DCHECK_EQ(1, decoder->control_depth());
1043       Control* func_block = decoder->control_at(0);
1044       __ bind(func_block->label.get());
1045       __ cache_state()->Steal(func_block->label_state);
1046     }
1047     if (!values.is_empty()) {
1048       if (values.size() > 1) return unsupported(decoder, "multi-return");
1049       LiftoffRegister reg = __ PopToRegister();
1050       LiftoffRegister return_reg =
1051           kNeedI64RegPair && values[0].type == kWasmI64
1052               ? LiftoffRegister::ForPair(kGpReturnRegisters[0],
1053                                          kGpReturnRegisters[1])
1054               : reg_class_for(values[0].type) == kGpReg
1055                     ? LiftoffRegister(kGpReturnRegisters[0])
1056                     : LiftoffRegister(kFpReturnRegisters[0]);
1057       if (reg != return_reg) __ Move(return_reg, reg, values[0].type);
1058     }
1059     __ LeaveFrame(StackFrame::WASM_COMPILED);
1060     __ DropStackSlotsAndRet(
1061         static_cast<uint32_t>(descriptor_->StackParameterCount()));
1062   }
1063 
GetLocal(FullDecoder * decoder,Value * result,const LocalIndexImmediate<validate> & imm)1064   void GetLocal(FullDecoder* decoder, Value* result,
1065                 const LocalIndexImmediate<validate>& imm) {
1066     auto& slot = __ cache_state()->stack_state[imm.index];
1067     DCHECK_EQ(slot.type(), imm.type);
1068     switch (slot.loc()) {
1069       case kRegister:
1070         __ PushRegister(slot.type(), slot.reg());
1071         break;
1072       case KIntConst:
1073         __ cache_state()->stack_state.emplace_back(imm.type, slot.i32_const());
1074         break;
1075       case kStack: {
1076         auto rc = reg_class_for(imm.type);
1077         LiftoffRegister reg = __ GetUnusedRegister(rc);
1078         __ Fill(reg, imm.index, imm.type);
1079         __ PushRegister(slot.type(), reg);
1080         break;
1081       }
1082     }
1083   }
1084 
SetLocalFromStackSlot(LiftoffAssembler::VarState & dst_slot,uint32_t local_index)1085   void SetLocalFromStackSlot(LiftoffAssembler::VarState& dst_slot,
1086                              uint32_t local_index) {
1087     auto& state = *__ cache_state();
1088     ValueType type = dst_slot.type();
1089     if (dst_slot.is_reg()) {
1090       LiftoffRegister slot_reg = dst_slot.reg();
1091       if (state.get_use_count(slot_reg) == 1) {
1092         __ Fill(dst_slot.reg(), state.stack_height() - 1, type);
1093         return;
1094       }
1095       state.dec_used(slot_reg);
1096       dst_slot.MakeStack();
1097     }
1098     DCHECK_EQ(type, __ local_type(local_index));
1099     RegClass rc = reg_class_for(type);
1100     LiftoffRegister dst_reg = __ GetUnusedRegister(rc);
1101     __ Fill(dst_reg, __ cache_state()->stack_height() - 1, type);
1102     dst_slot = LiftoffAssembler::VarState(type, dst_reg);
1103     __ cache_state()->inc_used(dst_reg);
1104   }
1105 
SetLocal(uint32_t local_index,bool is_tee)1106   void SetLocal(uint32_t local_index, bool is_tee) {
1107     auto& state = *__ cache_state();
1108     auto& source_slot = state.stack_state.back();
1109     auto& target_slot = state.stack_state[local_index];
1110     switch (source_slot.loc()) {
1111       case kRegister:
1112         if (target_slot.is_reg()) state.dec_used(target_slot.reg());
1113         target_slot = source_slot;
1114         if (is_tee) state.inc_used(target_slot.reg());
1115         break;
1116       case KIntConst:
1117         if (target_slot.is_reg()) state.dec_used(target_slot.reg());
1118         target_slot = source_slot;
1119         break;
1120       case kStack:
1121         SetLocalFromStackSlot(target_slot, local_index);
1122         break;
1123     }
1124     if (!is_tee) __ cache_state()->stack_state.pop_back();
1125   }
1126 
SetLocal(FullDecoder * decoder,const Value & value,const LocalIndexImmediate<validate> & imm)1127   void SetLocal(FullDecoder* decoder, const Value& value,
1128                 const LocalIndexImmediate<validate>& imm) {
1129     SetLocal(imm.index, false);
1130   }
1131 
TeeLocal(FullDecoder * decoder,const Value & value,Value * result,const LocalIndexImmediate<validate> & imm)1132   void TeeLocal(FullDecoder* decoder, const Value& value, Value* result,
1133                 const LocalIndexImmediate<validate>& imm) {
1134     SetLocal(imm.index, true);
1135   }
1136 
GetGlobalBaseAndOffset(const WasmGlobal * global,LiftoffRegList & pinned,uint32_t * offset)1137   LiftoffRegister GetGlobalBaseAndOffset(const WasmGlobal* global,
1138                                          LiftoffRegList& pinned,
1139                                          uint32_t* offset) {
1140     LiftoffRegister addr = pinned.set(__ GetUnusedRegister(kGpReg));
1141     if (global->mutability && global->imported) {
1142       LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kPointerLoadType);
1143       __ Load(addr, addr.gp(), no_reg, global->index * sizeof(Address),
1144               kPointerLoadType, pinned);
1145       *offset = 0;
1146     } else {
1147       LOAD_INSTANCE_FIELD(addr, GlobalsStart, kPointerLoadType);
1148       *offset = global->offset;
1149     }
1150     return addr;
1151   }
1152 
GetGlobal(FullDecoder * decoder,Value * result,const GlobalIndexImmediate<validate> & imm)1153   void GetGlobal(FullDecoder* decoder, Value* result,
1154                  const GlobalIndexImmediate<validate>& imm) {
1155     const auto* global = &env_->module->globals[imm.index];
1156     if (!CheckSupportedType(decoder, kTypes_ilfd, global->type, "global"))
1157       return;
1158     LiftoffRegList pinned;
1159     uint32_t offset = 0;
1160     LiftoffRegister addr = GetGlobalBaseAndOffset(global, pinned, &offset);
1161     LiftoffRegister value =
1162         pinned.set(__ GetUnusedRegister(reg_class_for(global->type), pinned));
1163     LoadType type = LoadType::ForValueType(global->type);
1164     __ Load(value, addr.gp(), no_reg, offset, type, pinned);
1165     __ PushRegister(global->type, value);
1166   }
1167 
SetGlobal(FullDecoder * decoder,const Value & value,const GlobalIndexImmediate<validate> & imm)1168   void SetGlobal(FullDecoder* decoder, const Value& value,
1169                  const GlobalIndexImmediate<validate>& imm) {
1170     auto* global = &env_->module->globals[imm.index];
1171     if (!CheckSupportedType(decoder, kTypes_ilfd, global->type, "global"))
1172       return;
1173     LiftoffRegList pinned;
1174     uint32_t offset = 0;
1175     LiftoffRegister addr = GetGlobalBaseAndOffset(global, pinned, &offset);
1176     LiftoffRegister reg = pinned.set(__ PopToRegister(pinned));
1177     StoreType type = StoreType::ForValueType(global->type);
1178     __ Store(addr.gp(), no_reg, offset, reg, type, pinned);
1179   }
1180 
Unreachable(FullDecoder * decoder)1181   void Unreachable(FullDecoder* decoder) {
1182     Label* unreachable_label = AddOutOfLineTrap(
1183         decoder->position(), WasmCode::kThrowWasmTrapUnreachable);
1184     __ emit_jump(unreachable_label);
1185     __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
1186   }
1187 
Select(FullDecoder * decoder,const Value & cond,const Value & fval,const Value & tval,Value * result)1188   void Select(FullDecoder* decoder, const Value& cond, const Value& fval,
1189               const Value& tval, Value* result) {
1190     LiftoffRegList pinned;
1191     Register condition = pinned.set(__ PopToRegister()).gp();
1192     ValueType type = __ cache_state()->stack_state.end()[-1].type();
1193     DCHECK_EQ(type, __ cache_state()->stack_state.end()[-2].type());
1194     LiftoffRegister false_value = pinned.set(__ PopToRegister(pinned));
1195     LiftoffRegister true_value = __ PopToRegister(pinned);
1196     LiftoffRegister dst =
1197         __ GetUnusedRegister(true_value.reg_class(), {true_value, false_value});
1198     __ PushRegister(type, dst);
1199 
1200     // Now emit the actual code to move either {true_value} or {false_value}
1201     // into {dst}.
1202     Label cont;
1203     Label case_false;
1204     __ emit_cond_jump(kEqual, &case_false, kWasmI32, condition);
1205     if (dst != true_value) __ Move(dst, true_value, type);
1206     __ emit_jump(&cont);
1207 
1208     __ bind(&case_false);
1209     if (dst != false_value) __ Move(dst, false_value, type);
1210     __ bind(&cont);
1211   }
1212 
Br(Control * target)1213   void Br(Control* target) {
1214     if (!target->br_merge()->reached) {
1215       target->label_state.InitMerge(*__ cache_state(), __ num_locals(),
1216                                     target->br_merge()->arity);
1217     }
1218     __ MergeStackWith(target->label_state, target->br_merge()->arity);
1219     __ jmp(target->label.get());
1220   }
1221 
Br(FullDecoder * decoder,Control * target)1222   void Br(FullDecoder* decoder, Control* target) { Br(target); }
1223 
BrIf(FullDecoder * decoder,const Value & cond,Control * target)1224   void BrIf(FullDecoder* decoder, const Value& cond, Control* target) {
1225     Label cont_false;
1226     Register value = __ PopToRegister().gp();
1227     __ emit_cond_jump(kEqual, &cont_false, kWasmI32, value);
1228 
1229     Br(target);
1230     __ bind(&cont_false);
1231   }
1232 
1233   // Generate a branch table case, potentially reusing previously generated
1234   // stack transfer code.
GenerateBrCase(FullDecoder * decoder,uint32_t br_depth,std::map<uint32_t,MovableLabel> & br_targets)1235   void GenerateBrCase(FullDecoder* decoder, uint32_t br_depth,
1236                       std::map<uint32_t, MovableLabel>& br_targets) {
1237     MovableLabel& label = br_targets[br_depth];
1238     if (label.get()->is_bound()) {
1239       __ jmp(label.get());
1240     } else {
1241       __ bind(label.get());
1242       Br(decoder->control_at(br_depth));
1243     }
1244   }
1245 
1246   // Generate a branch table for input in [min, max).
1247   // TODO(wasm): Generate a real branch table (like TF TableSwitch).
GenerateBrTable(FullDecoder * decoder,LiftoffRegister tmp,LiftoffRegister value,uint32_t min,uint32_t max,BranchTableIterator<validate> & table_iterator,std::map<uint32_t,MovableLabel> & br_targets)1248   void GenerateBrTable(FullDecoder* decoder, LiftoffRegister tmp,
1249                        LiftoffRegister value, uint32_t min, uint32_t max,
1250                        BranchTableIterator<validate>& table_iterator,
1251                        std::map<uint32_t, MovableLabel>& br_targets) {
1252     DCHECK_LT(min, max);
1253     // Check base case.
1254     if (max == min + 1) {
1255       DCHECK_EQ(min, table_iterator.cur_index());
1256       GenerateBrCase(decoder, table_iterator.next(), br_targets);
1257       return;
1258     }
1259 
1260     uint32_t split = min + (max - min) / 2;
1261     Label upper_half;
1262     __ LoadConstant(tmp, WasmValue(split));
1263     __ emit_cond_jump(kUnsignedGreaterEqual, &upper_half, kWasmI32, value.gp(),
1264                       tmp.gp());
1265     // Emit br table for lower half:
1266     GenerateBrTable(decoder, tmp, value, min, split, table_iterator,
1267                     br_targets);
1268     __ bind(&upper_half);
1269     // Emit br table for upper half:
1270     GenerateBrTable(decoder, tmp, value, split, max, table_iterator,
1271                     br_targets);
1272   }
1273 
BrTable(FullDecoder * decoder,const BranchTableImmediate<validate> & imm,const Value & key)1274   void BrTable(FullDecoder* decoder, const BranchTableImmediate<validate>& imm,
1275                const Value& key) {
1276     LiftoffRegList pinned;
1277     LiftoffRegister value = pinned.set(__ PopToRegister());
1278     BranchTableIterator<validate> table_iterator(decoder, imm);
1279     std::map<uint32_t, MovableLabel> br_targets;
1280 
1281     if (imm.table_count > 0) {
1282       LiftoffRegister tmp = __ GetUnusedRegister(kGpReg, pinned);
1283       __ LoadConstant(tmp, WasmValue(uint32_t{imm.table_count}));
1284       Label case_default;
1285       __ emit_cond_jump(kUnsignedGreaterEqual, &case_default, kWasmI32,
1286                         value.gp(), tmp.gp());
1287 
1288       GenerateBrTable(decoder, tmp, value, 0, imm.table_count, table_iterator,
1289                       br_targets);
1290 
1291       __ bind(&case_default);
1292     }
1293 
1294     // Generate the default case.
1295     GenerateBrCase(decoder, table_iterator.next(), br_targets);
1296     DCHECK(!table_iterator.has_next());
1297   }
1298 
Else(FullDecoder * decoder,Control * if_block)1299   void Else(FullDecoder* decoder, Control* if_block) {
1300     if (if_block->reachable()) __ emit_jump(if_block->label.get());
1301     __ bind(if_block->else_state->label.get());
1302     __ cache_state()->Steal(if_block->else_state->state);
1303   }
1304 
AddOutOfLineTrap(WasmCodePosition position,WasmCode::RuntimeStubId stub,uint32_t pc=0)1305   Label* AddOutOfLineTrap(WasmCodePosition position,
1306                           WasmCode::RuntimeStubId stub, uint32_t pc = 0) {
1307     DCHECK(!FLAG_wasm_no_bounds_checks);
1308     // The pc is needed for memory OOB trap with trap handler enabled. Other
1309     // callers should not even compute it.
1310     DCHECK_EQ(pc != 0, stub == WasmCode::kThrowWasmTrapMemOutOfBounds &&
1311                            env_->use_trap_handler);
1312 
1313     out_of_line_code_.push_back(OutOfLineCode::Trap(stub, position, pc));
1314     return out_of_line_code_.back().label.get();
1315   }
1316 
1317   // Returns true if the memory access is statically known to be out of bounds
1318   // (a jump to the trap was generated then); return false otherwise.
BoundsCheckMem(FullDecoder * decoder,uint32_t access_size,uint32_t offset,Register index,LiftoffRegList pinned)1319   bool BoundsCheckMem(FullDecoder* decoder, uint32_t access_size,
1320                       uint32_t offset, Register index, LiftoffRegList pinned) {
1321     const bool statically_oob = access_size > env_->max_memory_size ||
1322                                 offset > env_->max_memory_size - access_size;
1323 
1324     if (!statically_oob &&
1325         (FLAG_wasm_no_bounds_checks || env_->use_trap_handler)) {
1326       return false;
1327     }
1328 
1329     // TODO(wasm): This adds protected instruction information for the jump
1330     // instruction we are about to generate. It would be better to just not add
1331     // protected instruction info when the pc is 0.
1332     Label* trap_label = AddOutOfLineTrap(
1333         decoder->position(), WasmCode::kThrowWasmTrapMemOutOfBounds,
1334         env_->use_trap_handler ? __ pc_offset() : 0);
1335 
1336     if (statically_oob) {
1337       __ emit_jump(trap_label);
1338       Control* current_block = decoder->control_at(0);
1339       if (current_block->reachable()) {
1340         current_block->reachability = kSpecOnlyReachable;
1341       }
1342       return true;
1343     }
1344 
1345     DCHECK(!env_->use_trap_handler);
1346     DCHECK(!FLAG_wasm_no_bounds_checks);
1347 
1348     uint64_t end_offset = uint64_t{offset} + access_size - 1u;
1349 
1350     // If the end offset is larger than the smallest memory, dynamically check
1351     // the end offset against the actual memory size, which is not known at
1352     // compile time. Otherwise, only one check is required (see below).
1353     LiftoffRegister end_offset_reg =
1354         pinned.set(__ GetUnusedRegister(kGpReg, pinned));
1355     LiftoffRegister mem_size = __ GetUnusedRegister(kGpReg, pinned);
1356     LOAD_INSTANCE_FIELD(mem_size, MemorySize, kPointerLoadType);
1357 
1358     if (kPointerSize == 8) {
1359       __ LoadConstant(end_offset_reg, WasmValue(end_offset));
1360     } else {
1361       __ LoadConstant(end_offset_reg,
1362                       WasmValue(static_cast<uint32_t>(end_offset)));
1363     }
1364 
1365     if (end_offset >= env_->min_memory_size) {
1366       __ emit_cond_jump(kUnsignedGreaterEqual, trap_label,
1367                         LiftoffAssembler::kWasmIntPtr, end_offset_reg.gp(),
1368                         mem_size.gp());
1369     }
1370 
1371     // Just reuse the end_offset register for computing the effective size.
1372     LiftoffRegister effective_size_reg = end_offset_reg;
1373     __ emit_ptrsize_sub(effective_size_reg.gp(), mem_size.gp(),
1374                         end_offset_reg.gp());
1375 
1376     __ emit_i32_to_intptr(index, index);
1377 
1378     __ emit_cond_jump(kUnsignedGreaterEqual, trap_label,
1379                       LiftoffAssembler::kWasmIntPtr, index,
1380                       effective_size_reg.gp());
1381     return false;
1382   }
1383 
TraceMemoryOperation(bool is_store,MachineRepresentation rep,Register index,uint32_t offset,WasmCodePosition position)1384   void TraceMemoryOperation(bool is_store, MachineRepresentation rep,
1385                             Register index, uint32_t offset,
1386                             WasmCodePosition position) {
1387     // Before making the runtime call, spill all cache registers.
1388     __ SpillAllRegisters();
1389 
1390     LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
1391     // Get one register for computing the address (offset + index).
1392     LiftoffRegister address = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
1393     // Compute offset+index in address.
1394     __ LoadConstant(address, WasmValue(offset));
1395     __ emit_i32_add(address.gp(), address.gp(), index);
1396 
1397     // Get a register to hold the stack slot for MemoryTracingInfo.
1398     LiftoffRegister info = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
1399     // Allocate stack slot for MemoryTracingInfo.
1400     __ AllocateStackSlot(info.gp(), sizeof(MemoryTracingInfo));
1401 
1402     // Now store all information into the MemoryTracingInfo struct.
1403     __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, address), address,
1404              StoreType::kI32Store, pinned);
1405     __ LoadConstant(address, WasmValue(is_store ? 1 : 0));
1406     __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, is_store), address,
1407              StoreType::kI32Store8, pinned);
1408     __ LoadConstant(address, WasmValue(static_cast<int>(rep)));
1409     __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, mem_rep), address,
1410              StoreType::kI32Store8, pinned);
1411 
1412     source_position_table_builder_.AddPosition(__ pc_offset(),
1413                                                SourcePosition(position), false);
1414 
1415     Register args[] = {info.gp()};
1416     GenerateRuntimeCall(Runtime::kWasmTraceMemory, arraysize(args), args);
1417     __ DeallocateStackSlot(sizeof(MemoryTracingInfo));
1418   }
1419 
GenerateRuntimeCall(Runtime::FunctionId runtime_function,int num_args,Register * args)1420   void GenerateRuntimeCall(Runtime::FunctionId runtime_function, int num_args,
1421                            Register* args) {
1422     auto call_descriptor = compiler::Linkage::GetRuntimeCallDescriptor(
1423         compilation_zone_, runtime_function, num_args,
1424         compiler::Operator::kNoProperties, compiler::CallDescriptor::kNoFlags);
1425     // Currently, only one argument is supported. More arguments require some
1426     // caution for the parallel register moves (reuse StackTransferRecipe).
1427     DCHECK_EQ(1, num_args);
1428     constexpr size_t kInputShift = 1;  // Input 0 is the call target.
1429     compiler::LinkageLocation param_loc =
1430         call_descriptor->GetInputLocation(kInputShift);
1431     if (param_loc.IsRegister()) {
1432       Register reg = Register::from_code(param_loc.AsRegister());
1433       __ Move(LiftoffRegister(reg), LiftoffRegister(args[0]),
1434               LiftoffAssembler::kWasmIntPtr);
1435     } else {
1436       DCHECK(param_loc.IsCallerFrameSlot());
1437       LiftoffStackSlots stack_slots(&asm_);
1438       stack_slots.Add(LiftoffAssembler::VarState(LiftoffAssembler::kWasmIntPtr,
1439                                                  LiftoffRegister(args[0])));
1440       stack_slots.Construct();
1441     }
1442 
1443     // Set context to zero (Smi::kZero) for the runtime call.
1444     __ TurboAssembler::Move(kContextRegister, Smi::kZero);
1445     LiftoffRegister centry(kJavaScriptCallCodeStartRegister);
1446     LOAD_INSTANCE_FIELD(centry, CEntryStub, kPointerLoadType);
1447     __ CallRuntimeWithCEntry(runtime_function, centry.gp());
1448     safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
1449                                              Safepoint::kNoLazyDeopt);
1450   }
1451 
AddMemoryMasking(LiftoffRegister index,uint32_t * offset,LiftoffRegList & pinned)1452   LiftoffRegister AddMemoryMasking(LiftoffRegister index, uint32_t* offset,
1453                                    LiftoffRegList& pinned) {
1454     if (!FLAG_untrusted_code_mitigations || env_->use_trap_handler) {
1455       return index;
1456     }
1457     DEBUG_CODE_COMMENT("Mask memory index");
1458     // Make sure that we can overwrite {index}.
1459     if (__ cache_state()->is_used(index)) {
1460       LiftoffRegister old_index = index;
1461       pinned.clear(old_index);
1462       index = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
1463       if (index != old_index) __ Move(index.gp(), old_index.gp(), kWasmI32);
1464     }
1465     LiftoffRegister tmp = __ GetUnusedRegister(kGpReg, pinned);
1466     __ LoadConstant(tmp, WasmValue(*offset));
1467     __ emit_i32_add(index.gp(), index.gp(), tmp.gp());
1468     LOAD_INSTANCE_FIELD(tmp, MemoryMask, LoadType::kI32Load);
1469     __ emit_i32_and(index.gp(), index.gp(), tmp.gp());
1470     *offset = 0;
1471     return index;
1472   }
1473 
LoadMem(FullDecoder * decoder,LoadType type,const MemoryAccessImmediate<validate> & imm,const Value & index_val,Value * result)1474   void LoadMem(FullDecoder* decoder, LoadType type,
1475                const MemoryAccessImmediate<validate>& imm,
1476                const Value& index_val, Value* result) {
1477     ValueType value_type = type.value_type();
1478     if (!CheckSupportedType(decoder, kTypes_ilfd, value_type, "load")) return;
1479     LiftoffRegList pinned;
1480     LiftoffRegister index = pinned.set(__ PopToRegister());
1481     if (BoundsCheckMem(decoder, type.size(), imm.offset, index.gp(), pinned)) {
1482       return;
1483     }
1484     uint32_t offset = imm.offset;
1485     index = AddMemoryMasking(index, &offset, pinned);
1486     DEBUG_CODE_COMMENT("Load from memory");
1487     LiftoffRegister addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
1488     LOAD_INSTANCE_FIELD(addr, MemoryStart, kPointerLoadType);
1489     RegClass rc = reg_class_for(value_type);
1490     LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
1491     uint32_t protected_load_pc = 0;
1492     __ Load(value, addr.gp(), index.gp(), offset, type, pinned,
1493             &protected_load_pc, true);
1494     if (env_->use_trap_handler) {
1495       AddOutOfLineTrap(decoder->position(),
1496                        WasmCode::kThrowWasmTrapMemOutOfBounds,
1497                        protected_load_pc);
1498     }
1499     __ PushRegister(value_type, value);
1500 
1501     if (FLAG_wasm_trace_memory) {
1502       TraceMemoryOperation(false, type.mem_type().representation(), index.gp(),
1503                            offset, decoder->position());
1504     }
1505   }
1506 
StoreMem(FullDecoder * decoder,StoreType type,const MemoryAccessImmediate<validate> & imm,const Value & index_val,const Value & value_val)1507   void StoreMem(FullDecoder* decoder, StoreType type,
1508                 const MemoryAccessImmediate<validate>& imm,
1509                 const Value& index_val, const Value& value_val) {
1510     ValueType value_type = type.value_type();
1511     if (!CheckSupportedType(decoder, kTypes_ilfd, value_type, "store")) return;
1512     LiftoffRegList pinned;
1513     LiftoffRegister value = pinned.set(__ PopToRegister());
1514     LiftoffRegister index = pinned.set(__ PopToRegister(pinned));
1515     if (BoundsCheckMem(decoder, type.size(), imm.offset, index.gp(), pinned)) {
1516       return;
1517     }
1518     uint32_t offset = imm.offset;
1519     index = AddMemoryMasking(index, &offset, pinned);
1520     DEBUG_CODE_COMMENT("Store to memory");
1521     LiftoffRegister addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
1522     LOAD_INSTANCE_FIELD(addr, MemoryStart, kPointerLoadType);
1523     uint32_t protected_store_pc = 0;
1524     __ Store(addr.gp(), index.gp(), offset, value, type, pinned,
1525              &protected_store_pc, true);
1526     if (env_->use_trap_handler) {
1527       AddOutOfLineTrap(decoder->position(),
1528                        WasmCode::kThrowWasmTrapMemOutOfBounds,
1529                        protected_store_pc);
1530     }
1531     if (FLAG_wasm_trace_memory) {
1532       TraceMemoryOperation(true, type.mem_rep(), index.gp(), offset,
1533                            decoder->position());
1534     }
1535   }
1536 
CurrentMemoryPages(FullDecoder * decoder,Value * result)1537   void CurrentMemoryPages(FullDecoder* decoder, Value* result) {
1538     LiftoffRegList pinned;
1539     LiftoffRegister mem_size = pinned.set(__ GetUnusedRegister(kGpReg));
1540     LiftoffRegister tmp_const =
1541         pinned.set(__ GetUnusedRegister(kGpReg, pinned));
1542     LOAD_INSTANCE_FIELD(mem_size, MemorySize, LoadType::kI32Load);
1543     // TODO(clemensh): Shift by immediate directly.
1544     __ LoadConstant(tmp_const,
1545                     WasmValue(int32_t{WhichPowerOf2(kWasmPageSize)}));
1546     __ emit_i32_shr(mem_size.gp(), mem_size.gp(), tmp_const.gp(), pinned);
1547     __ PushRegister(kWasmI32, mem_size);
1548   }
1549 
GrowMemory(FullDecoder * decoder,const Value & value,Value * result_val)1550   void GrowMemory(FullDecoder* decoder, const Value& value, Value* result_val) {
1551     // Pop the input, then spill all cache registers to make the runtime call.
1552     LiftoffRegList pinned;
1553     LiftoffRegister input = pinned.set(__ PopToRegister());
1554     __ SpillAllRegisters();
1555 
1556     constexpr Register kGpReturnReg = kGpReturnRegisters[0];
1557     static_assert(kLiftoffAssemblerGpCacheRegs & Register::bit<kGpReturnReg>(),
1558                   "first return register is a cache register (needs more "
1559                   "complex code here otherwise)");
1560     LiftoffRegister result = pinned.set(LiftoffRegister(kGpReturnReg));
1561 
1562     WasmGrowMemoryDescriptor descriptor;
1563     DCHECK_EQ(0, descriptor.GetStackParameterCount());
1564     DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
1565     DCHECK_EQ(ValueTypes::MachineTypeFor(kWasmI32),
1566               descriptor.GetParameterType(0));
1567 
1568     Register param_reg = descriptor.GetRegisterParameter(0);
1569     if (input.gp() != param_reg) __ Move(param_reg, input.gp(), kWasmI32);
1570 
1571     __ CallRuntimeStub(WasmCode::kWasmGrowMemory);
1572     safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
1573                                              Safepoint::kNoLazyDeopt);
1574 
1575     if (kReturnRegister0 != result.gp()) {
1576       __ Move(result.gp(), kReturnRegister0, kWasmI32);
1577     }
1578 
1579     __ PushRegister(kWasmI32, result);
1580   }
1581 
CallDirect(FullDecoder * decoder,const CallFunctionImmediate<validate> & imm,const Value args[],Value returns[])1582   void CallDirect(FullDecoder* decoder,
1583                   const CallFunctionImmediate<validate>& imm,
1584                   const Value args[], Value returns[]) {
1585     if (imm.sig->return_count() > 1)
1586       return unsupported(decoder, "multi-return");
1587     if (imm.sig->return_count() == 1 &&
1588         !CheckSupportedType(decoder, kTypes_ilfd, imm.sig->GetReturn(0),
1589                             "return"))
1590       return;
1591 
1592     auto call_descriptor =
1593         compiler::GetWasmCallDescriptor(compilation_zone_, imm.sig);
1594     call_descriptor =
1595         GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
1596 
1597     if (imm.index < env_->module->num_imported_functions) {
1598       // A direct call to an imported function.
1599       LiftoffRegList pinned;
1600       LiftoffRegister tmp = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
1601       LiftoffRegister target = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
1602 
1603       LiftoffRegister imported_targets = tmp;
1604       LOAD_INSTANCE_FIELD(imported_targets, ImportedFunctionTargets,
1605                           kPointerLoadType);
1606       __ Load(target, imported_targets.gp(), no_reg,
1607               imm.index * sizeof(Address), kPointerLoadType, pinned);
1608 
1609       LiftoffRegister imported_instances = tmp;
1610       LOAD_INSTANCE_FIELD(imported_instances, ImportedFunctionInstances,
1611                           kPointerLoadType);
1612       LiftoffRegister target_instance = tmp;
1613       __ Load(target_instance, imported_instances.gp(), no_reg,
1614               compiler::FixedArrayOffsetMinusTag(imm.index), kPointerLoadType,
1615               pinned);
1616 
1617       LiftoffRegister* explicit_instance = &target_instance;
1618       Register target_reg = target.gp();
1619       __ PrepareCall(imm.sig, call_descriptor, &target_reg, explicit_instance);
1620       source_position_table_builder_.AddPosition(
1621           __ pc_offset(), SourcePosition(decoder->position()), false);
1622 
1623       __ CallIndirect(imm.sig, call_descriptor, target_reg);
1624 
1625       safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
1626                                                Safepoint::kNoLazyDeopt);
1627 
1628       __ FinishCall(imm.sig, call_descriptor);
1629     } else {
1630       // A direct call within this module just gets the current instance.
1631       __ PrepareCall(imm.sig, call_descriptor);
1632 
1633       source_position_table_builder_.AddPosition(
1634           __ pc_offset(), SourcePosition(decoder->position()), false);
1635 
1636       // Just encode the function index. This will be patched at instantiation.
1637       Address addr = static_cast<Address>(imm.index);
1638       __ CallNativeWasmCode(addr);
1639 
1640       safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
1641                                                Safepoint::kNoLazyDeopt);
1642 
1643       __ FinishCall(imm.sig, call_descriptor);
1644     }
1645   }
1646 
CallIndirect(FullDecoder * decoder,const Value & index_val,const CallIndirectImmediate<validate> & imm,const Value args[],Value returns[])1647   void CallIndirect(FullDecoder* decoder, const Value& index_val,
1648                     const CallIndirectImmediate<validate>& imm,
1649                     const Value args[], Value returns[]) {
1650     if (imm.sig->return_count() > 1) {
1651       return unsupported(decoder, "multi-return");
1652     }
1653     if (imm.sig->return_count() == 1 &&
1654         !CheckSupportedType(decoder, kTypes_ilfd, imm.sig->GetReturn(0),
1655                             "return")) {
1656       return;
1657     }
1658 
1659     // Pop the index.
1660     LiftoffRegister index = __ PopToRegister();
1661     // If that register is still being used after popping, we move it to another
1662     // register, because we want to modify that register.
1663     if (__ cache_state()->is_used(index)) {
1664       LiftoffRegister new_index =
1665           __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(index));
1666       __ Move(new_index, index, kWasmI32);
1667       index = new_index;
1668     }
1669 
1670     LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
1671     // Get three temporary registers.
1672     LiftoffRegister table = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
1673     LiftoffRegister tmp_const =
1674         pinned.set(__ GetUnusedRegister(kGpReg, pinned));
1675     LiftoffRegister scratch = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
1676 
1677     // Bounds check against the table size.
1678     Label* invalid_func_label = AddOutOfLineTrap(
1679         decoder->position(), WasmCode::kThrowWasmTrapFuncInvalid);
1680 
1681     uint32_t canonical_sig_num = env_->module->signature_ids[imm.sig_index];
1682     DCHECK_GE(canonical_sig_num, 0);
1683     DCHECK_GE(kMaxInt, canonical_sig_num);
1684 
1685     // Compare against table size stored in
1686     // {instance->indirect_function_table_size}.
1687     LOAD_INSTANCE_FIELD(tmp_const, IndirectFunctionTableSize,
1688                         LoadType::kI32Load);
1689     __ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kWasmI32,
1690                       index.gp(), tmp_const.gp());
1691 
1692     // Mask the index to prevent SSCA.
1693     if (FLAG_untrusted_code_mitigations) {
1694       DEBUG_CODE_COMMENT("Mask indirect call index");
1695       // mask = ((index - size) & ~index) >> 31
1696       // Reuse allocated registers; note: size is still stored in {tmp_const}.
1697       LiftoffRegister diff = table;
1698       LiftoffRegister neg_index = tmp_const;
1699       LiftoffRegister mask = scratch;
1700       // 1) diff = index - size
1701       __ emit_i32_sub(diff.gp(), index.gp(), tmp_const.gp());
1702       // 2) neg_index = ~index
1703       __ LoadConstant(neg_index, WasmValue(int32_t{-1}));
1704       __ emit_i32_xor(neg_index.gp(), neg_index.gp(), index.gp());
1705       // 3) mask = diff & neg_index
1706       __ emit_i32_and(mask.gp(), diff.gp(), neg_index.gp());
1707       // 4) mask = mask >> 31
1708       __ LoadConstant(tmp_const, WasmValue(int32_t{31}));
1709       __ emit_i32_sar(mask.gp(), mask.gp(), tmp_const.gp(), pinned);
1710 
1711       // Apply mask.
1712       __ emit_i32_and(index.gp(), index.gp(), mask.gp());
1713     }
1714 
1715     DEBUG_CODE_COMMENT("Check indirect call signature");
1716     // Load the signature from {instance->ift_sig_ids[key]}
1717     LOAD_INSTANCE_FIELD(table, IndirectFunctionTableSigIds, kPointerLoadType);
1718     __ LoadConstant(tmp_const,
1719                     WasmValue(static_cast<uint32_t>(sizeof(uint32_t))));
1720     // TODO(wasm): use a emit_i32_shli() instead of a multiply.
1721     // (currently cannot use shl on ia32/x64 because it clobbers %rcx).
1722     __ emit_i32_mul(index.gp(), index.gp(), tmp_const.gp());
1723     __ Load(scratch, table.gp(), index.gp(), 0, LoadType::kI32Load, pinned);
1724 
1725     // Compare against expected signature.
1726     __ LoadConstant(tmp_const, WasmValue(canonical_sig_num));
1727 
1728     Label* sig_mismatch_label = AddOutOfLineTrap(
1729         decoder->position(), WasmCode::kThrowWasmTrapFuncSigMismatch);
1730     __ emit_cond_jump(kUnequal, sig_mismatch_label,
1731                       LiftoffAssembler::kWasmIntPtr, scratch.gp(),
1732                       tmp_const.gp());
1733 
1734     DEBUG_CODE_COMMENT("Execute indirect call");
1735     if (kPointerSize == 8) {
1736       // {index} has already been multiplied by 4. Multiply by another 2.
1737       __ LoadConstant(tmp_const, WasmValue(2));
1738       __ emit_i32_mul(index.gp(), index.gp(), tmp_const.gp());
1739     }
1740 
1741     // Load the target from {instance->ift_targets[key]}
1742     LOAD_INSTANCE_FIELD(table, IndirectFunctionTableTargets, kPointerLoadType);
1743     __ Load(scratch, table.gp(), index.gp(), 0, kPointerLoadType, pinned);
1744 
1745     // Load the instance from {instance->ift_instances[key]}
1746     LOAD_INSTANCE_FIELD(table, IndirectFunctionTableInstances,
1747                         kPointerLoadType);
1748     __ Load(tmp_const, table.gp(), index.gp(),
1749             (FixedArray::kHeaderSize - kHeapObjectTag), kPointerLoadType,
1750             pinned);
1751     LiftoffRegister* explicit_instance = &tmp_const;
1752 
1753     source_position_table_builder_.AddPosition(
1754         __ pc_offset(), SourcePosition(decoder->position()), false);
1755 
1756     auto call_descriptor =
1757         compiler::GetWasmCallDescriptor(compilation_zone_, imm.sig);
1758     call_descriptor =
1759         GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
1760 
1761     Register target = scratch.gp();
1762     __ PrepareCall(imm.sig, call_descriptor, &target, explicit_instance);
1763     __ CallIndirect(imm.sig, call_descriptor, target);
1764 
1765     safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
1766                                              Safepoint::kNoLazyDeopt);
1767 
1768     __ FinishCall(imm.sig, call_descriptor);
1769   }
1770 
SimdOp(FullDecoder * decoder,WasmOpcode opcode,Vector<Value> args,Value * result)1771   void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
1772               Value* result) {
1773     unsupported(decoder, "simd");
1774   }
SimdLaneOp(FullDecoder * decoder,WasmOpcode opcode,const SimdLaneImmediate<validate> & imm,const Vector<Value> inputs,Value * result)1775   void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode,
1776                   const SimdLaneImmediate<validate>& imm,
1777                   const Vector<Value> inputs, Value* result) {
1778     unsupported(decoder, "simd");
1779   }
SimdShiftOp(FullDecoder * decoder,WasmOpcode opcode,const SimdShiftImmediate<validate> & imm,const Value & input,Value * result)1780   void SimdShiftOp(FullDecoder* decoder, WasmOpcode opcode,
1781                    const SimdShiftImmediate<validate>& imm, const Value& input,
1782                    Value* result) {
1783     unsupported(decoder, "simd");
1784   }
Simd8x16ShuffleOp(FullDecoder * decoder,const Simd8x16ShuffleImmediate<validate> & imm,const Value & input0,const Value & input1,Value * result)1785   void Simd8x16ShuffleOp(FullDecoder* decoder,
1786                          const Simd8x16ShuffleImmediate<validate>& imm,
1787                          const Value& input0, const Value& input1,
1788                          Value* result) {
1789     unsupported(decoder, "simd");
1790   }
Throw(FullDecoder * decoder,const ExceptionIndexImmediate<validate> &,Control * block,const Vector<Value> & args)1791   void Throw(FullDecoder* decoder, const ExceptionIndexImmediate<validate>&,
1792              Control* block, const Vector<Value>& args) {
1793     unsupported(decoder, "throw");
1794   }
CatchException(FullDecoder * decoder,const ExceptionIndexImmediate<validate> & imm,Control * block,Vector<Value> caught_values)1795   void CatchException(FullDecoder* decoder,
1796                       const ExceptionIndexImmediate<validate>& imm,
1797                       Control* block, Vector<Value> caught_values) {
1798     unsupported(decoder, "catch");
1799   }
AtomicOp(FullDecoder * decoder,WasmOpcode opcode,Vector<Value> args,const MemoryAccessImmediate<validate> & imm,Value * result)1800   void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
1801                 const MemoryAccessImmediate<validate>& imm, Value* result) {
1802     unsupported(decoder, "atomicop");
1803   }
1804 
1805  private:
1806   LiftoffAssembler asm_;
1807   compiler::CallDescriptor* const descriptor_;
1808   ModuleEnv* const env_;
1809   bool ok_ = true;
1810   std::vector<OutOfLineCode> out_of_line_code_;
1811   SourcePositionTableBuilder source_position_table_builder_;
1812   std::vector<trap_handler::ProtectedInstructionData> protected_instructions_;
1813   // Zone used to store information during compilation. The result will be
1814   // stored independently, such that this zone can die together with the
1815   // LiftoffCompiler after compilation.
1816   Zone* compilation_zone_;
1817   SafepointTableBuilder safepoint_table_builder_;
1818   // The pc offset of the instructions to reserve the stack frame. Needed to
1819   // patch the actually needed stack size in the end.
1820   uint32_t pc_offset_stack_frame_construction_ = 0;
1821 
TraceCacheState(FullDecoder * decoder) const1822   void TraceCacheState(FullDecoder* decoder) const {
1823 #ifdef DEBUG
1824     if (!FLAG_trace_liftoff || !FLAG_trace_wasm_decoder) return;
1825     StdoutStream os;
1826     for (int control_depth = decoder->control_depth() - 1; control_depth >= -1;
1827          --control_depth) {
1828       auto* cache_state =
1829           control_depth == -1 ? __ cache_state()
1830                               : &decoder->control_at(control_depth)
1831                                      ->label_state;
1832       os << PrintCollection(cache_state->stack_state);
1833       if (control_depth != -1) PrintF("; ");
1834     }
1835     os << "\n";
1836 #endif
1837   }
1838 };
1839 
1840 }  // namespace
1841 
ExecuteCompilation(WasmFeatures * detected)1842 bool LiftoffCompilationUnit::ExecuteCompilation(WasmFeatures* detected) {
1843   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
1844                "ExecuteLiftoffCompilation");
1845   base::ElapsedTimer compile_timer;
1846   if (FLAG_trace_wasm_decode_time) {
1847     compile_timer.Start();
1848   }
1849 
1850   Zone zone(wasm_unit_->wasm_engine_->allocator(), "LiftoffCompilationZone");
1851   const WasmModule* module =
1852       wasm_unit_->env_ ? wasm_unit_->env_->module : nullptr;
1853   auto call_descriptor =
1854       compiler::GetWasmCallDescriptor(&zone, wasm_unit_->func_body_.sig);
1855   base::Optional<TimedHistogramScope> liftoff_compile_time_scope(
1856       base::in_place, wasm_unit_->counters_->liftoff_compile_time());
1857   WasmFullDecoder<Decoder::kValidate, LiftoffCompiler> decoder(
1858       &zone, module, wasm_unit_->native_module_->enabled_features(), detected,
1859       wasm_unit_->func_body_, call_descriptor, wasm_unit_->env_, &zone);
1860   decoder.Decode();
1861   liftoff_compile_time_scope.reset();
1862   LiftoffCompiler* compiler = &decoder.interface();
1863   if (decoder.failed()) return false;  // validation error
1864   if (!compiler->ok()) {
1865     // Liftoff compilation failed.
1866     wasm_unit_->counters_->liftoff_unsupported_functions()->Increment();
1867     return false;
1868   }
1869 
1870   wasm_unit_->counters_->liftoff_compiled_functions()->Increment();
1871 
1872   if (FLAG_trace_wasm_decode_time) {
1873     double compile_ms = compile_timer.Elapsed().InMillisecondsF();
1874     PrintF(
1875         "wasm-compilation liftoff phase 1 ok: %u bytes, %0.3f ms decode and "
1876         "compile\n",
1877         static_cast<unsigned>(wasm_unit_->func_body_.end -
1878                               wasm_unit_->func_body_.start),
1879         compile_ms);
1880   }
1881 
1882   CodeDesc desc;
1883   compiler->GetCode(&desc);
1884   OwnedVector<byte> source_positions = compiler->GetSourcePositionTable();
1885   OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions =
1886       compiler->GetProtectedInstructions();
1887   uint32_t frame_slot_count = compiler->GetTotalFrameSlotCount();
1888   int safepoint_table_offset = compiler->GetSafepointTableOffset();
1889 
1890   code_ = wasm_unit_->native_module_->AddCode(
1891       wasm_unit_->func_index_, desc, frame_slot_count, safepoint_table_offset,
1892       0, std::move(protected_instructions), std::move(source_positions),
1893       WasmCode::kLiftoff);
1894   wasm_unit_->native_module_->PublishCode(code_);
1895 
1896   return true;
1897 }
1898 
FinishCompilation(ErrorThrower *)1899 WasmCode* LiftoffCompilationUnit::FinishCompilation(ErrorThrower*) {
1900   return code_;
1901 }
1902 
1903 #undef __
1904 #undef TRACE
1905 #undef WASM_INSTANCE_OBJECT_OFFSET
1906 #undef LOAD_INSTANCE_FIELD
1907 #undef DEBUG_CODE_COMMENT
1908 
1909 }  // namespace wasm
1910 }  // namespace internal
1911 }  // namespace v8
1912