• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <atomic>
6 #include <type_traits>
7 
8 #include "src/wasm/wasm-interpreter.h"
9 
10 #include "src/assembler-inl.h"
11 #include "src/boxed-float.h"
12 #include "src/compiler/wasm-compiler.h"
13 #include "src/conversions.h"
14 #include "src/identity-map.h"
15 #include "src/objects-inl.h"
16 #include "src/trap-handler/trap-handler.h"
17 #include "src/utils.h"
18 #include "src/wasm/decoder.h"
19 #include "src/wasm/function-body-decoder-impl.h"
20 #include "src/wasm/function-body-decoder.h"
21 #include "src/wasm/memory-tracing.h"
22 #include "src/wasm/wasm-engine.h"
23 #include "src/wasm/wasm-external-refs.h"
24 #include "src/wasm/wasm-limits.h"
25 #include "src/wasm/wasm-module.h"
26 #include "src/wasm/wasm-objects-inl.h"
27 
28 #include "src/zone/accounting-allocator.h"
29 #include "src/zone/zone-containers.h"
30 
31 namespace v8 {
32 namespace internal {
33 namespace wasm {
34 
35 #define TRACE(...)                                        \
36   do {                                                    \
37     if (FLAG_trace_wasm_interpreter) PrintF(__VA_ARGS__); \
38   } while (false)
39 
40 #if V8_TARGET_BIG_ENDIAN
41 #define LANE(i, type) ((sizeof(type.val) / sizeof(type.val[0])) - (i)-1)
42 #else
43 #define LANE(i, type) (i)
44 #endif
45 
46 #define FOREACH_INTERNAL_OPCODE(V) V(Breakpoint, 0xFF)
47 
48 #define WASM_CTYPES(V) \
49   V(I32, int32_t) V(I64, int64_t) V(F32, float) V(F64, double) V(S128, Simd128)
50 
51 #define FOREACH_SIMPLE_BINOP(V) \
52   V(I32Add, uint32_t, +)        \
53   V(I32Sub, uint32_t, -)        \
54   V(I32Mul, uint32_t, *)        \
55   V(I32And, uint32_t, &)        \
56   V(I32Ior, uint32_t, |)        \
57   V(I32Xor, uint32_t, ^)        \
58   V(I32Eq, uint32_t, ==)        \
59   V(I32Ne, uint32_t, !=)        \
60   V(I32LtU, uint32_t, <)        \
61   V(I32LeU, uint32_t, <=)       \
62   V(I32GtU, uint32_t, >)        \
63   V(I32GeU, uint32_t, >=)       \
64   V(I32LtS, int32_t, <)         \
65   V(I32LeS, int32_t, <=)        \
66   V(I32GtS, int32_t, >)         \
67   V(I32GeS, int32_t, >=)        \
68   V(I64Add, uint64_t, +)        \
69   V(I64Sub, uint64_t, -)        \
70   V(I64Mul, uint64_t, *)        \
71   V(I64And, uint64_t, &)        \
72   V(I64Ior, uint64_t, |)        \
73   V(I64Xor, uint64_t, ^)        \
74   V(I64Eq, uint64_t, ==)        \
75   V(I64Ne, uint64_t, !=)        \
76   V(I64LtU, uint64_t, <)        \
77   V(I64LeU, uint64_t, <=)       \
78   V(I64GtU, uint64_t, >)        \
79   V(I64GeU, uint64_t, >=)       \
80   V(I64LtS, int64_t, <)         \
81   V(I64LeS, int64_t, <=)        \
82   V(I64GtS, int64_t, >)         \
83   V(I64GeS, int64_t, >=)        \
84   V(F32Add, float, +)           \
85   V(F32Sub, float, -)           \
86   V(F32Eq, float, ==)           \
87   V(F32Ne, float, !=)           \
88   V(F32Lt, float, <)            \
89   V(F32Le, float, <=)           \
90   V(F32Gt, float, >)            \
91   V(F32Ge, float, >=)           \
92   V(F64Add, double, +)          \
93   V(F64Sub, double, -)          \
94   V(F64Eq, double, ==)          \
95   V(F64Ne, double, !=)          \
96   V(F64Lt, double, <)           \
97   V(F64Le, double, <=)          \
98   V(F64Gt, double, >)           \
99   V(F64Ge, double, >=)          \
100   V(F32Mul, float, *)           \
101   V(F64Mul, double, *)          \
102   V(F32Div, float, /)           \
103   V(F64Div, double, /)
104 
105 #define FOREACH_OTHER_BINOP(V) \
106   V(I32DivS, int32_t)          \
107   V(I32DivU, uint32_t)         \
108   V(I32RemS, int32_t)          \
109   V(I32RemU, uint32_t)         \
110   V(I32Shl, uint32_t)          \
111   V(I32ShrU, uint32_t)         \
112   V(I32ShrS, int32_t)          \
113   V(I64DivS, int64_t)          \
114   V(I64DivU, uint64_t)         \
115   V(I64RemS, int64_t)          \
116   V(I64RemU, uint64_t)         \
117   V(I64Shl, uint64_t)          \
118   V(I64ShrU, uint64_t)         \
119   V(I64ShrS, int64_t)          \
120   V(I32Ror, int32_t)           \
121   V(I32Rol, int32_t)           \
122   V(I64Ror, int64_t)           \
123   V(I64Rol, int64_t)           \
124   V(F32Min, float)             \
125   V(F32Max, float)             \
126   V(F64Min, double)            \
127   V(F64Max, double)            \
128   V(I32AsmjsDivS, int32_t)     \
129   V(I32AsmjsDivU, uint32_t)    \
130   V(I32AsmjsRemS, int32_t)     \
131   V(I32AsmjsRemU, uint32_t)    \
132   V(F32CopySign, Float32)      \
133   V(F64CopySign, Float64)
134 
135 #define FOREACH_I32CONV_FLOATOP(V)   \
136   V(I32SConvertF32, int32_t, float)  \
137   V(I32SConvertF64, int32_t, double) \
138   V(I32UConvertF32, uint32_t, float) \
139   V(I32UConvertF64, uint32_t, double)
140 
141 #define FOREACH_OTHER_UNOP(V)    \
142   V(I32Clz, uint32_t)            \
143   V(I32Ctz, uint32_t)            \
144   V(I32Popcnt, uint32_t)         \
145   V(I32Eqz, uint32_t)            \
146   V(I64Clz, uint64_t)            \
147   V(I64Ctz, uint64_t)            \
148   V(I64Popcnt, uint64_t)         \
149   V(I64Eqz, uint64_t)            \
150   V(F32Abs, Float32)             \
151   V(F32Neg, Float32)             \
152   V(F32Ceil, float)              \
153   V(F32Floor, float)             \
154   V(F32Trunc, float)             \
155   V(F32NearestInt, float)        \
156   V(F64Abs, Float64)             \
157   V(F64Neg, Float64)             \
158   V(F64Ceil, double)             \
159   V(F64Floor, double)            \
160   V(F64Trunc, double)            \
161   V(F64NearestInt, double)       \
162   V(I32ConvertI64, int64_t)      \
163   V(I64SConvertF32, float)       \
164   V(I64SConvertF64, double)      \
165   V(I64UConvertF32, float)       \
166   V(I64UConvertF64, double)      \
167   V(I64SConvertI32, int32_t)     \
168   V(I64UConvertI32, uint32_t)    \
169   V(F32SConvertI32, int32_t)     \
170   V(F32UConvertI32, uint32_t)    \
171   V(F32SConvertI64, int64_t)     \
172   V(F32UConvertI64, uint64_t)    \
173   V(F32ConvertF64, double)       \
174   V(F32ReinterpretI32, int32_t)  \
175   V(F64SConvertI32, int32_t)     \
176   V(F64UConvertI32, uint32_t)    \
177   V(F64SConvertI64, int64_t)     \
178   V(F64UConvertI64, uint64_t)    \
179   V(F64ConvertF32, float)        \
180   V(F64ReinterpretI64, int64_t)  \
181   V(I32AsmjsSConvertF32, float)  \
182   V(I32AsmjsUConvertF32, float)  \
183   V(I32AsmjsSConvertF64, double) \
184   V(I32AsmjsUConvertF64, double) \
185   V(F32Sqrt, float)              \
186   V(F64Sqrt, double)
187 
188 namespace {
189 
190 constexpr uint32_t kFloat32SignBitMask = uint32_t{1} << 31;
191 constexpr uint64_t kFloat64SignBitMask = uint64_t{1} << 63;
192 
ExecuteI32DivS(int32_t a,int32_t b,TrapReason * trap)193 inline int32_t ExecuteI32DivS(int32_t a, int32_t b, TrapReason* trap) {
194   if (b == 0) {
195     *trap = kTrapDivByZero;
196     return 0;
197   }
198   if (b == -1 && a == std::numeric_limits<int32_t>::min()) {
199     *trap = kTrapDivUnrepresentable;
200     return 0;
201   }
202   return a / b;
203 }
204 
ExecuteI32DivU(uint32_t a,uint32_t b,TrapReason * trap)205 inline uint32_t ExecuteI32DivU(uint32_t a, uint32_t b, TrapReason* trap) {
206   if (b == 0) {
207     *trap = kTrapDivByZero;
208     return 0;
209   }
210   return a / b;
211 }
212 
ExecuteI32RemS(int32_t a,int32_t b,TrapReason * trap)213 inline int32_t ExecuteI32RemS(int32_t a, int32_t b, TrapReason* trap) {
214   if (b == 0) {
215     *trap = kTrapRemByZero;
216     return 0;
217   }
218   if (b == -1) return 0;
219   return a % b;
220 }
221 
ExecuteI32RemU(uint32_t a,uint32_t b,TrapReason * trap)222 inline uint32_t ExecuteI32RemU(uint32_t a, uint32_t b, TrapReason* trap) {
223   if (b == 0) {
224     *trap = kTrapRemByZero;
225     return 0;
226   }
227   return a % b;
228 }
229 
ExecuteI32Shl(uint32_t a,uint32_t b,TrapReason * trap)230 inline uint32_t ExecuteI32Shl(uint32_t a, uint32_t b, TrapReason* trap) {
231   return a << (b & 0x1F);
232 }
233 
ExecuteI32ShrU(uint32_t a,uint32_t b,TrapReason * trap)234 inline uint32_t ExecuteI32ShrU(uint32_t a, uint32_t b, TrapReason* trap) {
235   return a >> (b & 0x1F);
236 }
237 
ExecuteI32ShrS(int32_t a,int32_t b,TrapReason * trap)238 inline int32_t ExecuteI32ShrS(int32_t a, int32_t b, TrapReason* trap) {
239   return a >> (b & 0x1F);
240 }
241 
ExecuteI64DivS(int64_t a,int64_t b,TrapReason * trap)242 inline int64_t ExecuteI64DivS(int64_t a, int64_t b, TrapReason* trap) {
243   if (b == 0) {
244     *trap = kTrapDivByZero;
245     return 0;
246   }
247   if (b == -1 && a == std::numeric_limits<int64_t>::min()) {
248     *trap = kTrapDivUnrepresentable;
249     return 0;
250   }
251   return a / b;
252 }
253 
ExecuteI64DivU(uint64_t a,uint64_t b,TrapReason * trap)254 inline uint64_t ExecuteI64DivU(uint64_t a, uint64_t b, TrapReason* trap) {
255   if (b == 0) {
256     *trap = kTrapDivByZero;
257     return 0;
258   }
259   return a / b;
260 }
261 
ExecuteI64RemS(int64_t a,int64_t b,TrapReason * trap)262 inline int64_t ExecuteI64RemS(int64_t a, int64_t b, TrapReason* trap) {
263   if (b == 0) {
264     *trap = kTrapRemByZero;
265     return 0;
266   }
267   if (b == -1) return 0;
268   return a % b;
269 }
270 
ExecuteI64RemU(uint64_t a,uint64_t b,TrapReason * trap)271 inline uint64_t ExecuteI64RemU(uint64_t a, uint64_t b, TrapReason* trap) {
272   if (b == 0) {
273     *trap = kTrapRemByZero;
274     return 0;
275   }
276   return a % b;
277 }
278 
ExecuteI64Shl(uint64_t a,uint64_t b,TrapReason * trap)279 inline uint64_t ExecuteI64Shl(uint64_t a, uint64_t b, TrapReason* trap) {
280   return a << (b & 0x3F);
281 }
282 
ExecuteI64ShrU(uint64_t a,uint64_t b,TrapReason * trap)283 inline uint64_t ExecuteI64ShrU(uint64_t a, uint64_t b, TrapReason* trap) {
284   return a >> (b & 0x3F);
285 }
286 
ExecuteI64ShrS(int64_t a,int64_t b,TrapReason * trap)287 inline int64_t ExecuteI64ShrS(int64_t a, int64_t b, TrapReason* trap) {
288   return a >> (b & 0x3F);
289 }
290 
ExecuteI32Ror(uint32_t a,uint32_t b,TrapReason * trap)291 inline uint32_t ExecuteI32Ror(uint32_t a, uint32_t b, TrapReason* trap) {
292   uint32_t shift = (b & 0x1F);
293   return (a >> shift) | (a << (32 - shift));
294 }
295 
ExecuteI32Rol(uint32_t a,uint32_t b,TrapReason * trap)296 inline uint32_t ExecuteI32Rol(uint32_t a, uint32_t b, TrapReason* trap) {
297   uint32_t shift = (b & 0x1F);
298   return (a << shift) | (a >> (32 - shift));
299 }
300 
ExecuteI64Ror(uint64_t a,uint64_t b,TrapReason * trap)301 inline uint64_t ExecuteI64Ror(uint64_t a, uint64_t b, TrapReason* trap) {
302   uint32_t shift = (b & 0x3F);
303   return (a >> shift) | (a << (64 - shift));
304 }
305 
ExecuteI64Rol(uint64_t a,uint64_t b,TrapReason * trap)306 inline uint64_t ExecuteI64Rol(uint64_t a, uint64_t b, TrapReason* trap) {
307   uint32_t shift = (b & 0x3F);
308   return (a << shift) | (a >> (64 - shift));
309 }
310 
ExecuteF32Min(float a,float b,TrapReason * trap)311 inline float ExecuteF32Min(float a, float b, TrapReason* trap) {
312   return JSMin(a, b);
313 }
314 
ExecuteF32Max(float a,float b,TrapReason * trap)315 inline float ExecuteF32Max(float a, float b, TrapReason* trap) {
316   return JSMax(a, b);
317 }
318 
ExecuteF32CopySign(Float32 a,Float32 b,TrapReason * trap)319 inline Float32 ExecuteF32CopySign(Float32 a, Float32 b, TrapReason* trap) {
320   return Float32::FromBits((a.get_bits() & ~kFloat32SignBitMask) |
321                            (b.get_bits() & kFloat32SignBitMask));
322 }
323 
ExecuteF64Min(double a,double b,TrapReason * trap)324 inline double ExecuteF64Min(double a, double b, TrapReason* trap) {
325   return JSMin(a, b);
326 }
327 
ExecuteF64Max(double a,double b,TrapReason * trap)328 inline double ExecuteF64Max(double a, double b, TrapReason* trap) {
329   return JSMax(a, b);
330 }
331 
ExecuteF64CopySign(Float64 a,Float64 b,TrapReason * trap)332 inline Float64 ExecuteF64CopySign(Float64 a, Float64 b, TrapReason* trap) {
333   return Float64::FromBits((a.get_bits() & ~kFloat64SignBitMask) |
334                            (b.get_bits() & kFloat64SignBitMask));
335 }
336 
ExecuteI32AsmjsDivS(int32_t a,int32_t b,TrapReason * trap)337 inline int32_t ExecuteI32AsmjsDivS(int32_t a, int32_t b, TrapReason* trap) {
338   if (b == 0) return 0;
339   if (b == -1 && a == std::numeric_limits<int32_t>::min()) {
340     return std::numeric_limits<int32_t>::min();
341   }
342   return a / b;
343 }
344 
ExecuteI32AsmjsDivU(uint32_t a,uint32_t b,TrapReason * trap)345 inline uint32_t ExecuteI32AsmjsDivU(uint32_t a, uint32_t b, TrapReason* trap) {
346   if (b == 0) return 0;
347   return a / b;
348 }
349 
ExecuteI32AsmjsRemS(int32_t a,int32_t b,TrapReason * trap)350 inline int32_t ExecuteI32AsmjsRemS(int32_t a, int32_t b, TrapReason* trap) {
351   if (b == 0) return 0;
352   if (b == -1) return 0;
353   return a % b;
354 }
355 
ExecuteI32AsmjsRemU(uint32_t a,uint32_t b,TrapReason * trap)356 inline uint32_t ExecuteI32AsmjsRemU(uint32_t a, uint32_t b, TrapReason* trap) {
357   if (b == 0) return 0;
358   return a % b;
359 }
360 
ExecuteI32AsmjsSConvertF32(float a,TrapReason * trap)361 inline int32_t ExecuteI32AsmjsSConvertF32(float a, TrapReason* trap) {
362   return DoubleToInt32(a);
363 }
364 
ExecuteI32AsmjsUConvertF32(float a,TrapReason * trap)365 inline uint32_t ExecuteI32AsmjsUConvertF32(float a, TrapReason* trap) {
366   return DoubleToUint32(a);
367 }
368 
ExecuteI32AsmjsSConvertF64(double a,TrapReason * trap)369 inline int32_t ExecuteI32AsmjsSConvertF64(double a, TrapReason* trap) {
370   return DoubleToInt32(a);
371 }
372 
ExecuteI32AsmjsUConvertF64(double a,TrapReason * trap)373 inline uint32_t ExecuteI32AsmjsUConvertF64(double a, TrapReason* trap) {
374   return DoubleToUint32(a);
375 }
376 
ExecuteI32Clz(uint32_t val,TrapReason * trap)377 int32_t ExecuteI32Clz(uint32_t val, TrapReason* trap) {
378   return base::bits::CountLeadingZeros(val);
379 }
380 
ExecuteI32Ctz(uint32_t val,TrapReason * trap)381 uint32_t ExecuteI32Ctz(uint32_t val, TrapReason* trap) {
382   return base::bits::CountTrailingZeros(val);
383 }
384 
ExecuteI32Popcnt(uint32_t val,TrapReason * trap)385 uint32_t ExecuteI32Popcnt(uint32_t val, TrapReason* trap) {
386   return base::bits::CountPopulation(val);
387 }
388 
ExecuteI32Eqz(uint32_t val,TrapReason * trap)389 inline uint32_t ExecuteI32Eqz(uint32_t val, TrapReason* trap) {
390   return val == 0 ? 1 : 0;
391 }
392 
ExecuteI64Clz(uint64_t val,TrapReason * trap)393 int64_t ExecuteI64Clz(uint64_t val, TrapReason* trap) {
394   return base::bits::CountLeadingZeros(val);
395 }
396 
ExecuteI64Ctz(uint64_t val,TrapReason * trap)397 inline uint64_t ExecuteI64Ctz(uint64_t val, TrapReason* trap) {
398   return base::bits::CountTrailingZeros(val);
399 }
400 
ExecuteI64Popcnt(uint64_t val,TrapReason * trap)401 inline int64_t ExecuteI64Popcnt(uint64_t val, TrapReason* trap) {
402   return base::bits::CountPopulation(val);
403 }
404 
ExecuteI64Eqz(uint64_t val,TrapReason * trap)405 inline int32_t ExecuteI64Eqz(uint64_t val, TrapReason* trap) {
406   return val == 0 ? 1 : 0;
407 }
408 
ExecuteF32Abs(Float32 a,TrapReason * trap)409 inline Float32 ExecuteF32Abs(Float32 a, TrapReason* trap) {
410   return Float32::FromBits(a.get_bits() & ~kFloat32SignBitMask);
411 }
412 
ExecuteF32Neg(Float32 a,TrapReason * trap)413 inline Float32 ExecuteF32Neg(Float32 a, TrapReason* trap) {
414   return Float32::FromBits(a.get_bits() ^ kFloat32SignBitMask);
415 }
416 
ExecuteF32Ceil(float a,TrapReason * trap)417 inline float ExecuteF32Ceil(float a, TrapReason* trap) { return ceilf(a); }
418 
ExecuteF32Floor(float a,TrapReason * trap)419 inline float ExecuteF32Floor(float a, TrapReason* trap) { return floorf(a); }
420 
ExecuteF32Trunc(float a,TrapReason * trap)421 inline float ExecuteF32Trunc(float a, TrapReason* trap) { return truncf(a); }
422 
ExecuteF32NearestInt(float a,TrapReason * trap)423 inline float ExecuteF32NearestInt(float a, TrapReason* trap) {
424   return nearbyintf(a);
425 }
426 
ExecuteF32Sqrt(float a,TrapReason * trap)427 inline float ExecuteF32Sqrt(float a, TrapReason* trap) {
428   float result = sqrtf(a);
429   return result;
430 }
431 
ExecuteF64Abs(Float64 a,TrapReason * trap)432 inline Float64 ExecuteF64Abs(Float64 a, TrapReason* trap) {
433   return Float64::FromBits(a.get_bits() & ~kFloat64SignBitMask);
434 }
435 
ExecuteF64Neg(Float64 a,TrapReason * trap)436 inline Float64 ExecuteF64Neg(Float64 a, TrapReason* trap) {
437   return Float64::FromBits(a.get_bits() ^ kFloat64SignBitMask);
438 }
439 
ExecuteF64Ceil(double a,TrapReason * trap)440 inline double ExecuteF64Ceil(double a, TrapReason* trap) { return ceil(a); }
441 
ExecuteF64Floor(double a,TrapReason * trap)442 inline double ExecuteF64Floor(double a, TrapReason* trap) { return floor(a); }
443 
ExecuteF64Trunc(double a,TrapReason * trap)444 inline double ExecuteF64Trunc(double a, TrapReason* trap) { return trunc(a); }
445 
ExecuteF64NearestInt(double a,TrapReason * trap)446 inline double ExecuteF64NearestInt(double a, TrapReason* trap) {
447   return nearbyint(a);
448 }
449 
ExecuteF64Sqrt(double a,TrapReason * trap)450 inline double ExecuteF64Sqrt(double a, TrapReason* trap) { return sqrt(a); }
451 
452 template <typename int_type, typename float_type>
ExecuteConvert(float_type a,TrapReason * trap)453 int_type ExecuteConvert(float_type a, TrapReason* trap) {
454   if (is_inbounds<int_type>(a)) {
455     return static_cast<int_type>(a);
456   }
457   *trap = kTrapFloatUnrepresentable;
458   return 0;
459 }
460 
461 template <typename int_type, typename float_type>
ExecuteConvertSaturate(float_type a)462 int_type ExecuteConvertSaturate(float_type a) {
463   TrapReason base_trap = kTrapCount;
464   int32_t val = ExecuteConvert<int_type>(a, &base_trap);
465   if (base_trap == kTrapCount) {
466     return val;
467   }
468   return std::isnan(a) ? 0
469                        : (a < static_cast<float_type>(0.0)
470                               ? std::numeric_limits<int_type>::min()
471                               : std::numeric_limits<int_type>::max());
472 }
473 
474 template <typename dst_type, typename src_type, void (*fn)(Address)>
CallExternalIntToFloatFunction(src_type input)475 inline dst_type CallExternalIntToFloatFunction(src_type input) {
476   uint8_t data[std::max(sizeof(dst_type), sizeof(src_type))];
477   Address data_addr = reinterpret_cast<Address>(data);
478   WriteUnalignedValue<src_type>(data_addr, input);
479   fn(data_addr);
480   return ReadUnalignedValue<dst_type>(data_addr);
481 }
482 
483 template <typename dst_type, typename src_type, int32_t (*fn)(Address)>
CallExternalFloatToIntFunction(src_type input,TrapReason * trap)484 inline dst_type CallExternalFloatToIntFunction(src_type input,
485                                                TrapReason* trap) {
486   uint8_t data[std::max(sizeof(dst_type), sizeof(src_type))];
487   Address data_addr = reinterpret_cast<Address>(data);
488   WriteUnalignedValue<src_type>(data_addr, input);
489   if (!fn(data_addr)) *trap = kTrapFloatUnrepresentable;
490   return ReadUnalignedValue<dst_type>(data_addr);
491 }
492 
ExecuteI32ConvertI64(int64_t a,TrapReason * trap)493 inline uint32_t ExecuteI32ConvertI64(int64_t a, TrapReason* trap) {
494   return static_cast<uint32_t>(a & 0xFFFFFFFF);
495 }
496 
ExecuteI64SConvertF32(float a,TrapReason * trap)497 int64_t ExecuteI64SConvertF32(float a, TrapReason* trap) {
498   return CallExternalFloatToIntFunction<int64_t, float,
499                                         float32_to_int64_wrapper>(a, trap);
500 }
501 
ExecuteI64SConvertSatF32(float a)502 int64_t ExecuteI64SConvertSatF32(float a) {
503   TrapReason base_trap = kTrapCount;
504   int64_t val = ExecuteI64SConvertF32(a, &base_trap);
505   if (base_trap == kTrapCount) {
506     return val;
507   }
508   return std::isnan(a) ? 0
509                        : (a < 0.0 ? std::numeric_limits<int64_t>::min()
510                                   : std::numeric_limits<int64_t>::max());
511 }
512 
ExecuteI64SConvertF64(double a,TrapReason * trap)513 int64_t ExecuteI64SConvertF64(double a, TrapReason* trap) {
514   return CallExternalFloatToIntFunction<int64_t, double,
515                                         float64_to_int64_wrapper>(a, trap);
516 }
517 
ExecuteI64SConvertSatF64(double a)518 int64_t ExecuteI64SConvertSatF64(double a) {
519   TrapReason base_trap = kTrapCount;
520   int64_t val = ExecuteI64SConvertF64(a, &base_trap);
521   if (base_trap == kTrapCount) {
522     return val;
523   }
524   return std::isnan(a) ? 0
525                        : (a < 0.0 ? std::numeric_limits<int64_t>::min()
526                                   : std::numeric_limits<int64_t>::max());
527 }
528 
ExecuteI64UConvertF32(float a,TrapReason * trap)529 uint64_t ExecuteI64UConvertF32(float a, TrapReason* trap) {
530   return CallExternalFloatToIntFunction<uint64_t, float,
531                                         float32_to_uint64_wrapper>(a, trap);
532 }
533 
ExecuteI64UConvertSatF32(float a)534 uint64_t ExecuteI64UConvertSatF32(float a) {
535   TrapReason base_trap = kTrapCount;
536   uint64_t val = ExecuteI64UConvertF32(a, &base_trap);
537   if (base_trap == kTrapCount) {
538     return val;
539   }
540   return std::isnan(a) ? 0
541                        : (a < 0.0 ? std::numeric_limits<uint64_t>::min()
542                                   : std::numeric_limits<uint64_t>::max());
543 }
544 
ExecuteI64UConvertF64(double a,TrapReason * trap)545 uint64_t ExecuteI64UConvertF64(double a, TrapReason* trap) {
546   return CallExternalFloatToIntFunction<uint64_t, double,
547                                         float64_to_uint64_wrapper>(a, trap);
548 }
549 
ExecuteI64UConvertSatF64(double a)550 uint64_t ExecuteI64UConvertSatF64(double a) {
551   TrapReason base_trap = kTrapCount;
552   int64_t val = ExecuteI64UConvertF64(a, &base_trap);
553   if (base_trap == kTrapCount) {
554     return val;
555   }
556   return std::isnan(a) ? 0
557                        : (a < 0.0 ? std::numeric_limits<uint64_t>::min()
558                                   : std::numeric_limits<uint64_t>::max());
559 }
560 
ExecuteI64SConvertI32(int32_t a,TrapReason * trap)561 inline int64_t ExecuteI64SConvertI32(int32_t a, TrapReason* trap) {
562   return static_cast<int64_t>(a);
563 }
564 
ExecuteI64UConvertI32(uint32_t a,TrapReason * trap)565 inline int64_t ExecuteI64UConvertI32(uint32_t a, TrapReason* trap) {
566   return static_cast<uint64_t>(a);
567 }
568 
ExecuteF32SConvertI32(int32_t a,TrapReason * trap)569 inline float ExecuteF32SConvertI32(int32_t a, TrapReason* trap) {
570   return static_cast<float>(a);
571 }
572 
ExecuteF32UConvertI32(uint32_t a,TrapReason * trap)573 inline float ExecuteF32UConvertI32(uint32_t a, TrapReason* trap) {
574   return static_cast<float>(a);
575 }
576 
ExecuteF32SConvertI64(int64_t a,TrapReason * trap)577 inline float ExecuteF32SConvertI64(int64_t a, TrapReason* trap) {
578   return static_cast<float>(a);
579 }
580 
ExecuteF32UConvertI64(uint64_t a,TrapReason * trap)581 inline float ExecuteF32UConvertI64(uint64_t a, TrapReason* trap) {
582   return CallExternalIntToFloatFunction<float, uint64_t,
583                                         uint64_to_float32_wrapper>(a);
584 }
585 
ExecuteF32ConvertF64(double a,TrapReason * trap)586 inline float ExecuteF32ConvertF64(double a, TrapReason* trap) {
587   return static_cast<float>(a);
588 }
589 
ExecuteF32ReinterpretI32(int32_t a,TrapReason * trap)590 inline Float32 ExecuteF32ReinterpretI32(int32_t a, TrapReason* trap) {
591   return Float32::FromBits(a);
592 }
593 
ExecuteF64SConvertI32(int32_t a,TrapReason * trap)594 inline double ExecuteF64SConvertI32(int32_t a, TrapReason* trap) {
595   return static_cast<double>(a);
596 }
597 
ExecuteF64UConvertI32(uint32_t a,TrapReason * trap)598 inline double ExecuteF64UConvertI32(uint32_t a, TrapReason* trap) {
599   return static_cast<double>(a);
600 }
601 
ExecuteF64SConvertI64(int64_t a,TrapReason * trap)602 inline double ExecuteF64SConvertI64(int64_t a, TrapReason* trap) {
603   return static_cast<double>(a);
604 }
605 
ExecuteF64UConvertI64(uint64_t a,TrapReason * trap)606 inline double ExecuteF64UConvertI64(uint64_t a, TrapReason* trap) {
607   return CallExternalIntToFloatFunction<double, uint64_t,
608                                         uint64_to_float64_wrapper>(a);
609 }
610 
ExecuteF64ConvertF32(float a,TrapReason * trap)611 inline double ExecuteF64ConvertF32(float a, TrapReason* trap) {
612   return static_cast<double>(a);
613 }
614 
ExecuteF64ReinterpretI64(int64_t a,TrapReason * trap)615 inline Float64 ExecuteF64ReinterpretI64(int64_t a, TrapReason* trap) {
616   return Float64::FromBits(a);
617 }
618 
ExecuteI32ReinterpretF32(WasmValue a)619 inline int32_t ExecuteI32ReinterpretF32(WasmValue a) {
620   return a.to_f32_boxed().get_bits();
621 }
622 
ExecuteI64ReinterpretF64(WasmValue a)623 inline int64_t ExecuteI64ReinterpretF64(WasmValue a) {
624   return a.to_f64_boxed().get_bits();
625 }
626 
627 enum InternalOpcode {
628 #define DECL_INTERNAL_ENUM(name, value) kInternal##name = value,
629   FOREACH_INTERNAL_OPCODE(DECL_INTERNAL_ENUM)
630 #undef DECL_INTERNAL_ENUM
631 };
632 
OpcodeName(uint32_t val)633 const char* OpcodeName(uint32_t val) {
634   switch (val) {
635 #define DECL_INTERNAL_CASE(name, value) \
636   case kInternal##name:                 \
637     return "Internal" #name;
638     FOREACH_INTERNAL_OPCODE(DECL_INTERNAL_CASE)
639 #undef DECL_INTERNAL_CASE
640   }
641   return WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(val));
642 }
643 
644 class SideTable;
645 
646 // Code and metadata needed to execute a function.
647 struct InterpreterCode {
648   const WasmFunction* function;  // wasm function
649   BodyLocalDecls locals;         // local declarations
650   const byte* orig_start;        // start of original code
651   const byte* orig_end;          // end of original code
652   byte* start;                   // start of (maybe altered) code
653   byte* end;                     // end of (maybe altered) code
654   SideTable* side_table;         // precomputed side table for control flow.
655 
atv8::internal::wasm::__anon49171a620111::InterpreterCode656   const byte* at(pc_t pc) { return start + pc; }
657 };
658 
659 // A helper class to compute the control transfers for each bytecode offset.
660 // Control transfers allow Br, BrIf, BrTable, If, Else, and End bytecodes to
661 // be directly executed without the need to dynamically track blocks.
662 class SideTable : public ZoneObject {
663  public:
664   ControlTransferMap map_;
665   uint32_t max_stack_height_ = 0;
666 
SideTable(Zone * zone,const WasmModule * module,InterpreterCode * code)667   SideTable(Zone* zone, const WasmModule* module, InterpreterCode* code)
668       : map_(zone) {
669     // Create a zone for all temporary objects.
670     Zone control_transfer_zone(zone->allocator(), ZONE_NAME);
671 
672     // Represents a control flow label.
673     class CLabel : public ZoneObject {
674       explicit CLabel(Zone* zone, uint32_t target_stack_height, uint32_t arity)
675           : target_stack_height(target_stack_height),
676             arity(arity),
677             refs(zone) {}
678 
679      public:
680       struct Ref {
681         const byte* from_pc;
682         const uint32_t stack_height;
683       };
684       const byte* target = nullptr;
685       uint32_t target_stack_height;
686       // Arity when branching to this label.
687       const uint32_t arity;
688       ZoneVector<Ref> refs;
689 
690       static CLabel* New(Zone* zone, uint32_t stack_height, uint32_t arity) {
691         return new (zone) CLabel(zone, stack_height, arity);
692       }
693 
694       // Bind this label to the given PC.
695       void Bind(const byte* pc) {
696         DCHECK_NULL(target);
697         target = pc;
698       }
699 
700       // Reference this label from the given location.
701       void Ref(const byte* from_pc, uint32_t stack_height) {
702         // Target being bound before a reference means this is a loop.
703         DCHECK_IMPLIES(target, *target == kExprLoop);
704         refs.push_back({from_pc, stack_height});
705       }
706 
707       void Finish(ControlTransferMap* map, const byte* start) {
708         DCHECK_NOT_NULL(target);
709         for (auto ref : refs) {
710           size_t offset = static_cast<size_t>(ref.from_pc - start);
711           auto pcdiff = static_cast<pcdiff_t>(target - ref.from_pc);
712           DCHECK_GE(ref.stack_height, target_stack_height);
713           spdiff_t spdiff =
714               static_cast<spdiff_t>(ref.stack_height - target_stack_height);
715           TRACE("control transfer @%zu: Δpc %d, stack %u->%u = -%u\n", offset,
716                 pcdiff, ref.stack_height, target_stack_height, spdiff);
717           ControlTransferEntry& entry = (*map)[offset];
718           entry.pc_diff = pcdiff;
719           entry.sp_diff = spdiff;
720           entry.target_arity = arity;
721         }
722       }
723     };
724 
725     // An entry in the control stack.
726     struct Control {
727       const byte* pc;
728       CLabel* end_label;
729       CLabel* else_label;
730       // Arity (number of values on the stack) when exiting this control
731       // structure via |end|.
732       uint32_t exit_arity;
733       // Track whether this block was already left, i.e. all further
734       // instructions are unreachable.
735       bool unreachable = false;
736 
737       Control(const byte* pc, CLabel* end_label, CLabel* else_label,
738               uint32_t exit_arity)
739           : pc(pc),
740             end_label(end_label),
741             else_label(else_label),
742             exit_arity(exit_arity) {}
743       Control(const byte* pc, CLabel* end_label, uint32_t exit_arity)
744           : Control(pc, end_label, nullptr, exit_arity) {}
745 
746       void Finish(ControlTransferMap* map, const byte* start) {
747         end_label->Finish(map, start);
748         if (else_label) else_label->Finish(map, start);
749       }
750     };
751 
752     // Compute the ControlTransfer map.
753     // This algorithm maintains a stack of control constructs similar to the
754     // AST decoder. The {control_stack} allows matching {br,br_if,br_table}
755     // bytecodes with their target, as well as determining whether the current
756     // bytecodes are within the true or false block of an else.
757     ZoneVector<Control> control_stack(&control_transfer_zone);
758     uint32_t stack_height = 0;
759     uint32_t func_arity =
760         static_cast<uint32_t>(code->function->sig->return_count());
761     CLabel* func_label =
762         CLabel::New(&control_transfer_zone, stack_height, func_arity);
763     control_stack.emplace_back(code->orig_start, func_label, func_arity);
764     auto control_parent = [&]() -> Control& {
765       DCHECK_LE(2, control_stack.size());
766       return control_stack[control_stack.size() - 2];
767     };
768     auto copy_unreachable = [&] {
769       control_stack.back().unreachable = control_parent().unreachable;
770     };
771     for (BytecodeIterator i(code->orig_start, code->orig_end, &code->locals);
772          i.has_next(); i.next()) {
773       WasmOpcode opcode = i.current();
774       if (WasmOpcodes::IsPrefixOpcode(opcode)) opcode = i.prefixed_opcode();
775       bool unreachable = control_stack.back().unreachable;
776       if (unreachable) {
777         TRACE("@%u: %s (is unreachable)\n", i.pc_offset(),
778               WasmOpcodes::OpcodeName(opcode));
779       } else {
780         auto stack_effect =
781             StackEffect(module, code->function->sig, i.pc(), i.end());
782         TRACE("@%u: %s (sp %d - %d + %d)\n", i.pc_offset(),
783               WasmOpcodes::OpcodeName(opcode), stack_height, stack_effect.first,
784               stack_effect.second);
785         DCHECK_GE(stack_height, stack_effect.first);
786         DCHECK_GE(kMaxUInt32, static_cast<uint64_t>(stack_height) -
787                                   stack_effect.first + stack_effect.second);
788         stack_height = stack_height - stack_effect.first + stack_effect.second;
789         if (stack_height > max_stack_height_) max_stack_height_ = stack_height;
790       }
791       switch (opcode) {
792         case kExprBlock:
793         case kExprLoop: {
794           bool is_loop = opcode == kExprLoop;
795           BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i,
796                                                        i.pc());
797           if (imm.type == kWasmVar) {
798             imm.sig = module->signatures[imm.sig_index];
799           }
800           TRACE("control @%u: %s, arity %d->%d\n", i.pc_offset(),
801                 is_loop ? "Loop" : "Block", imm.in_arity(), imm.out_arity());
802           CLabel* label =
803               CLabel::New(&control_transfer_zone, stack_height,
804                           is_loop ? imm.in_arity() : imm.out_arity());
805           control_stack.emplace_back(i.pc(), label, imm.out_arity());
806           copy_unreachable();
807           if (is_loop) label->Bind(i.pc());
808           break;
809         }
810         case kExprIf: {
811           BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i,
812                                                        i.pc());
813           if (imm.type == kWasmVar) {
814             imm.sig = module->signatures[imm.sig_index];
815           }
816           TRACE("control @%u: If, arity %d->%d\n", i.pc_offset(),
817                 imm.in_arity(), imm.out_arity());
818           CLabel* end_label = CLabel::New(&control_transfer_zone, stack_height,
819                                           imm.out_arity());
820           CLabel* else_label =
821               CLabel::New(&control_transfer_zone, stack_height, 0);
822           control_stack.emplace_back(i.pc(), end_label, else_label,
823                                      imm.out_arity());
824           copy_unreachable();
825           if (!unreachable) else_label->Ref(i.pc(), stack_height);
826           break;
827         }
828         case kExprElse: {
829           Control* c = &control_stack.back();
830           copy_unreachable();
831           TRACE("control @%u: Else\n", i.pc_offset());
832           if (!control_parent().unreachable) {
833             c->end_label->Ref(i.pc(), stack_height);
834           }
835           DCHECK_NOT_NULL(c->else_label);
836           c->else_label->Bind(i.pc() + 1);
837           c->else_label->Finish(&map_, code->orig_start);
838           c->else_label = nullptr;
839           DCHECK_GE(stack_height, c->end_label->target_stack_height);
840           stack_height = c->end_label->target_stack_height;
841           break;
842         }
843         case kExprEnd: {
844           Control* c = &control_stack.back();
845           TRACE("control @%u: End\n", i.pc_offset());
846           // Only loops have bound labels.
847           DCHECK_IMPLIES(c->end_label->target, *c->pc == kExprLoop);
848           if (!c->end_label->target) {
849             if (c->else_label) c->else_label->Bind(i.pc());
850             c->end_label->Bind(i.pc() + 1);
851           }
852           c->Finish(&map_, code->orig_start);
853           DCHECK_GE(stack_height, c->end_label->target_stack_height);
854           stack_height = c->end_label->target_stack_height + c->exit_arity;
855           control_stack.pop_back();
856           break;
857         }
858         case kExprBr: {
859           BreakDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
860           TRACE("control @%u: Br[depth=%u]\n", i.pc_offset(), imm.depth);
861           Control* c = &control_stack[control_stack.size() - imm.depth - 1];
862           if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
863           break;
864         }
865         case kExprBrIf: {
866           BreakDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
867           TRACE("control @%u: BrIf[depth=%u]\n", i.pc_offset(), imm.depth);
868           Control* c = &control_stack[control_stack.size() - imm.depth - 1];
869           if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
870           break;
871         }
872         case kExprBrTable: {
873           BranchTableImmediate<Decoder::kNoValidate> imm(&i, i.pc());
874           BranchTableIterator<Decoder::kNoValidate> iterator(&i, imm);
875           TRACE("control @%u: BrTable[count=%u]\n", i.pc_offset(),
876                 imm.table_count);
877           if (!unreachable) {
878             while (iterator.has_next()) {
879               uint32_t j = iterator.cur_index();
880               uint32_t target = iterator.next();
881               Control* c = &control_stack[control_stack.size() - target - 1];
882               c->end_label->Ref(i.pc() + j, stack_height);
883             }
884           }
885           break;
886         }
887         default:
888           break;
889       }
890       if (WasmOpcodes::IsUnconditionalJump(opcode)) {
891         control_stack.back().unreachable = true;
892       }
893     }
894     DCHECK_EQ(0, control_stack.size());
895     DCHECK_EQ(func_arity, stack_height);
896   }
897 
Lookup(pc_t from)898   ControlTransferEntry& Lookup(pc_t from) {
899     auto result = map_.find(from);
900     DCHECK(result != map_.end());
901     return result->second;
902   }
903 };
904 
905 struct ExternalCallResult {
906   enum Type {
907     // The function should be executed inside this interpreter.
908     INTERNAL,
909     // For indirect calls: Table or function does not exist.
910     INVALID_FUNC,
911     // For indirect calls: Signature does not match expected signature.
912     SIGNATURE_MISMATCH,
913     // The function was executed and returned normally.
914     EXTERNAL_RETURNED,
915     // The function was executed, threw an exception, and the stack was unwound.
916     EXTERNAL_UNWOUND
917   };
918   Type type;
919   // If type is INTERNAL, this field holds the function to call internally.
920   InterpreterCode* interpreter_code;
921 
ExternalCallResultv8::internal::wasm::__anon49171a620111::ExternalCallResult922   ExternalCallResult(Type type) : type(type) {  // NOLINT
923     DCHECK_NE(INTERNAL, type);
924   }
ExternalCallResultv8::internal::wasm::__anon49171a620111::ExternalCallResult925   ExternalCallResult(Type type, InterpreterCode* code)
926       : type(type), interpreter_code(code) {
927     DCHECK_EQ(INTERNAL, type);
928   }
929 };
930 
931 // The main storage for interpreter code. It maps {WasmFunction} to the
932 // metadata needed to execute each function.
933 class CodeMap {
934   Zone* zone_;
935   const WasmModule* module_;
936   ZoneVector<InterpreterCode> interpreter_code_;
937   // TODO(wasm): Remove this testing wart. It is needed because interpreter
938   // entry stubs are not generated in testing the interpreter in cctests.
939   bool call_indirect_through_module_ = false;
940 
941  public:
CodeMap(const WasmModule * module,const uint8_t * module_start,Zone * zone)942   CodeMap(const WasmModule* module, const uint8_t* module_start, Zone* zone)
943       : zone_(zone), module_(module), interpreter_code_(zone) {
944     if (module == nullptr) return;
945     interpreter_code_.reserve(module->functions.size());
946     for (const WasmFunction& function : module->functions) {
947       if (function.imported) {
948         DCHECK(!function.code.is_set());
949         AddFunction(&function, nullptr, nullptr);
950       } else {
951         AddFunction(&function, module_start + function.code.offset(),
952                     module_start + function.code.end_offset());
953       }
954     }
955   }
956 
call_indirect_through_module()957   bool call_indirect_through_module() { return call_indirect_through_module_; }
958 
set_call_indirect_through_module(bool val)959   void set_call_indirect_through_module(bool val) {
960     call_indirect_through_module_ = val;
961   }
962 
module() const963   const WasmModule* module() const { return module_; }
964 
GetCode(const WasmFunction * function)965   InterpreterCode* GetCode(const WasmFunction* function) {
966     InterpreterCode* code = GetCode(function->func_index);
967     DCHECK_EQ(function, code->function);
968     return code;
969   }
970 
GetCode(uint32_t function_index)971   InterpreterCode* GetCode(uint32_t function_index) {
972     DCHECK_LT(function_index, interpreter_code_.size());
973     return Preprocess(&interpreter_code_[function_index]);
974   }
975 
GetIndirectCode(uint32_t table_index,uint32_t entry_index)976   InterpreterCode* GetIndirectCode(uint32_t table_index, uint32_t entry_index) {
977     uint32_t saved_index;
978     USE(saved_index);
979     if (table_index >= module_->tables.size()) return nullptr;
980     // Mask table index for SSCA mitigation.
981     saved_index = table_index;
982     table_index &= static_cast<int32_t>((table_index - module_->tables.size()) &
983                                         ~static_cast<int32_t>(table_index)) >>
984                    31;
985     DCHECK_EQ(table_index, saved_index);
986     const WasmTable* table = &module_->tables[table_index];
987     if (entry_index >= table->values.size()) return nullptr;
988     // Mask entry_index for SSCA mitigation.
989     saved_index = entry_index;
990     entry_index &= static_cast<int32_t>((entry_index - table->values.size()) &
991                                         ~static_cast<int32_t>(entry_index)) >>
992                    31;
993     DCHECK_EQ(entry_index, saved_index);
994     uint32_t index = table->values[entry_index];
995     if (index >= interpreter_code_.size()) return nullptr;
996     // Mask index for SSCA mitigation.
997     saved_index = index;
998     index &= static_cast<int32_t>((index - interpreter_code_.size()) &
999                                   ~static_cast<int32_t>(index)) >>
1000              31;
1001     DCHECK_EQ(index, saved_index);
1002 
1003     return GetCode(index);
1004   }
1005 
Preprocess(InterpreterCode * code)1006   InterpreterCode* Preprocess(InterpreterCode* code) {
1007     DCHECK_EQ(code->function->imported, code->start == nullptr);
1008     if (!code->side_table && code->start) {
1009       // Compute the control targets map and the local declarations.
1010       code->side_table = new (zone_) SideTable(zone_, module_, code);
1011     }
1012     return code;
1013   }
1014 
AddFunction(const WasmFunction * function,const byte * code_start,const byte * code_end)1015   void AddFunction(const WasmFunction* function, const byte* code_start,
1016                    const byte* code_end) {
1017     InterpreterCode code = {
1018         function, BodyLocalDecls(zone_),         code_start,
1019         code_end, const_cast<byte*>(code_start), const_cast<byte*>(code_end),
1020         nullptr};
1021 
1022     DCHECK_EQ(interpreter_code_.size(), function->func_index);
1023     interpreter_code_.push_back(code);
1024   }
1025 
SetFunctionCode(const WasmFunction * function,const byte * start,const byte * end)1026   void SetFunctionCode(const WasmFunction* function, const byte* start,
1027                        const byte* end) {
1028     DCHECK_LT(function->func_index, interpreter_code_.size());
1029     InterpreterCode* code = &interpreter_code_[function->func_index];
1030     DCHECK_EQ(function, code->function);
1031     code->orig_start = start;
1032     code->orig_end = end;
1033     code->start = const_cast<byte*>(start);
1034     code->end = const_cast<byte*>(end);
1035     code->side_table = nullptr;
1036     Preprocess(code);
1037   }
1038 };
1039 
1040 // Like a static_cast from src to dst, but specialized for boxed floats.
1041 template <typename dst, typename src>
1042 struct converter {
operator ()v8::internal::wasm::__anon49171a620111::converter1043   dst operator()(src val) const { return static_cast<dst>(val); }
1044 };
1045 template <>
1046 struct converter<Float64, uint64_t> {
operator ()v8::internal::wasm::__anon49171a620111::converter1047   Float64 operator()(uint64_t val) const { return Float64::FromBits(val); }
1048 };
1049 template <>
1050 struct converter<Float32, uint32_t> {
operator ()v8::internal::wasm::__anon49171a620111::converter1051   Float32 operator()(uint32_t val) const { return Float32::FromBits(val); }
1052 };
1053 template <>
1054 struct converter<uint64_t, Float64> {
operator ()v8::internal::wasm::__anon49171a620111::converter1055   uint64_t operator()(Float64 val) const { return val.get_bits(); }
1056 };
1057 template <>
1058 struct converter<uint32_t, Float32> {
operator ()v8::internal::wasm::__anon49171a620111::converter1059   uint32_t operator()(Float32 val) const { return val.get_bits(); }
1060 };
1061 
1062 template <typename T>
has_nondeterminism(T val)1063 V8_INLINE bool has_nondeterminism(T val) {
1064   static_assert(!std::is_floating_point<T>::value, "missing specialization");
1065   return false;
1066 }
1067 template <>
has_nondeterminism(float val)1068 V8_INLINE bool has_nondeterminism<float>(float val) {
1069   return std::isnan(val);
1070 }
1071 template <>
has_nondeterminism(double val)1072 V8_INLINE bool has_nondeterminism<double>(double val) {
1073   return std::isnan(val);
1074 }
1075 
1076 // Responsible for executing code directly.
1077 class ThreadImpl {
1078   struct Activation {
1079     uint32_t fp;
1080     sp_t sp;
Activationv8::internal::wasm::__anon49171a620111::ThreadImpl::Activation1081     Activation(uint32_t fp, sp_t sp) : fp(fp), sp(sp) {}
1082   };
1083 
1084  public:
ThreadImpl(Zone * zone,CodeMap * codemap,Handle<WasmInstanceObject> instance_object)1085   ThreadImpl(Zone* zone, CodeMap* codemap,
1086              Handle<WasmInstanceObject> instance_object)
1087       : codemap_(codemap),
1088         instance_object_(instance_object),
1089         frames_(zone),
1090         activations_(zone) {}
1091 
1092   //==========================================================================
1093   // Implementation of public interface for WasmInterpreter::Thread.
1094   //==========================================================================
1095 
state()1096   WasmInterpreter::State state() { return state_; }
1097 
InitFrame(const WasmFunction * function,WasmValue * args)1098   void InitFrame(const WasmFunction* function, WasmValue* args) {
1099     DCHECK_EQ(current_activation().fp, frames_.size());
1100     InterpreterCode* code = codemap()->GetCode(function);
1101     size_t num_params = function->sig->parameter_count();
1102     EnsureStackSpace(num_params);
1103     Push(args, num_params);
1104     PushFrame(code);
1105   }
1106 
Run(int num_steps=-1)1107   WasmInterpreter::State Run(int num_steps = -1) {
1108     DCHECK(state_ == WasmInterpreter::STOPPED ||
1109            state_ == WasmInterpreter::PAUSED);
1110     DCHECK(num_steps == -1 || num_steps > 0);
1111     if (num_steps == -1) {
1112       TRACE("  => Run()\n");
1113     } else if (num_steps == 1) {
1114       TRACE("  => Step()\n");
1115     } else {
1116       TRACE("  => Run(%d)\n", num_steps);
1117     }
1118     state_ = WasmInterpreter::RUNNING;
1119     Execute(frames_.back().code, frames_.back().pc, num_steps);
1120     // If state_ is STOPPED, the current activation must be fully unwound.
1121     DCHECK_IMPLIES(state_ == WasmInterpreter::STOPPED,
1122                    current_activation().fp == frames_.size());
1123     return state_;
1124   }
1125 
Pause()1126   void Pause() { UNIMPLEMENTED(); }
1127 
Reset()1128   void Reset() {
1129     TRACE("----- RESET -----\n");
1130     sp_ = stack_.get();
1131     frames_.clear();
1132     state_ = WasmInterpreter::STOPPED;
1133     trap_reason_ = kTrapCount;
1134     possible_nondeterminism_ = false;
1135   }
1136 
GetFrameCount()1137   int GetFrameCount() {
1138     DCHECK_GE(kMaxInt, frames_.size());
1139     return static_cast<int>(frames_.size());
1140   }
1141 
GetReturnValue(uint32_t index)1142   WasmValue GetReturnValue(uint32_t index) {
1143     if (state_ == WasmInterpreter::TRAPPED) return WasmValue(0xDEADBEEF);
1144     DCHECK_EQ(WasmInterpreter::FINISHED, state_);
1145     Activation act = current_activation();
1146     // Current activation must be finished.
1147     DCHECK_EQ(act.fp, frames_.size());
1148     return GetStackValue(act.sp + index);
1149   }
1150 
GetStackValue(sp_t index)1151   WasmValue GetStackValue(sp_t index) {
1152     DCHECK_GT(StackHeight(), index);
1153     return stack_[index];
1154   }
1155 
SetStackValue(sp_t index,WasmValue value)1156   void SetStackValue(sp_t index, WasmValue value) {
1157     DCHECK_GT(StackHeight(), index);
1158     stack_[index] = value;
1159   }
1160 
GetTrapReason()1161   TrapReason GetTrapReason() { return trap_reason_; }
1162 
GetBreakpointPc()1163   pc_t GetBreakpointPc() { return break_pc_; }
1164 
PossibleNondeterminism()1165   bool PossibleNondeterminism() { return possible_nondeterminism_; }
1166 
NumInterpretedCalls()1167   uint64_t NumInterpretedCalls() { return num_interpreted_calls_; }
1168 
AddBreakFlags(uint8_t flags)1169   void AddBreakFlags(uint8_t flags) { break_flags_ |= flags; }
1170 
ClearBreakFlags()1171   void ClearBreakFlags() { break_flags_ = WasmInterpreter::BreakFlag::None; }
1172 
NumActivations()1173   uint32_t NumActivations() {
1174     return static_cast<uint32_t>(activations_.size());
1175   }
1176 
StartActivation()1177   uint32_t StartActivation() {
1178     TRACE("----- START ACTIVATION %zu -----\n", activations_.size());
1179     // If you use activations, use them consistently:
1180     DCHECK_IMPLIES(activations_.empty(), frames_.empty());
1181     DCHECK_IMPLIES(activations_.empty(), StackHeight() == 0);
1182     uint32_t activation_id = static_cast<uint32_t>(activations_.size());
1183     activations_.emplace_back(static_cast<uint32_t>(frames_.size()),
1184                               StackHeight());
1185     state_ = WasmInterpreter::STOPPED;
1186     return activation_id;
1187   }
1188 
FinishActivation(uint32_t id)1189   void FinishActivation(uint32_t id) {
1190     TRACE("----- FINISH ACTIVATION %zu -----\n", activations_.size() - 1);
1191     DCHECK_LT(0, activations_.size());
1192     DCHECK_EQ(activations_.size() - 1, id);
1193     // Stack height must match the start of this activation (otherwise unwind
1194     // first).
1195     DCHECK_EQ(activations_.back().fp, frames_.size());
1196     DCHECK_LE(activations_.back().sp, StackHeight());
1197     sp_ = stack_.get() + activations_.back().sp;
1198     activations_.pop_back();
1199   }
1200 
ActivationFrameBase(uint32_t id)1201   uint32_t ActivationFrameBase(uint32_t id) {
1202     DCHECK_GT(activations_.size(), id);
1203     return activations_[id].fp;
1204   }
1205 
1206   // Handle a thrown exception. Returns whether the exception was handled inside
1207   // the current activation. Unwinds the interpreted stack accordingly.
HandleException(Isolate * isolate)1208   WasmInterpreter::Thread::ExceptionHandlingResult HandleException(
1209       Isolate* isolate) {
1210     DCHECK(isolate->has_pending_exception());
1211     // TODO(wasm): Add wasm exception handling (would return HANDLED).
1212     USE(isolate->pending_exception());
1213     TRACE("----- UNWIND -----\n");
1214     DCHECK_LT(0, activations_.size());
1215     Activation& act = activations_.back();
1216     DCHECK_LE(act.fp, frames_.size());
1217     frames_.resize(act.fp);
1218     DCHECK_LE(act.sp, StackHeight());
1219     sp_ = stack_.get() + act.sp;
1220     state_ = WasmInterpreter::STOPPED;
1221     return WasmInterpreter::Thread::UNWOUND;
1222   }
1223 
1224  private:
1225   // Entries on the stack of functions being evaluated.
1226   struct Frame {
1227     InterpreterCode* code;
1228     pc_t pc;
1229     sp_t sp;
1230 
1231     // Limit of parameters.
plimitv8::internal::wasm::__anon49171a620111::ThreadImpl::Frame1232     sp_t plimit() { return sp + code->function->sig->parameter_count(); }
1233     // Limit of locals.
llimitv8::internal::wasm::__anon49171a620111::ThreadImpl::Frame1234     sp_t llimit() { return plimit() + code->locals.type_list.size(); }
1235   };
1236 
1237   struct Block {
1238     pc_t pc;
1239     sp_t sp;
1240     size_t fp;
1241     unsigned arity;
1242   };
1243 
1244   friend class InterpretedFrameImpl;
1245 
1246   CodeMap* codemap_;
1247   Handle<WasmInstanceObject> instance_object_;
1248   std::unique_ptr<WasmValue[]> stack_;
1249   WasmValue* stack_limit_ = nullptr;  // End of allocated stack space.
1250   WasmValue* sp_ = nullptr;           // Current stack pointer.
1251   ZoneVector<Frame> frames_;
1252   WasmInterpreter::State state_ = WasmInterpreter::STOPPED;
1253   pc_t break_pc_ = kInvalidPc;
1254   TrapReason trap_reason_ = kTrapCount;
1255   bool possible_nondeterminism_ = false;
1256   uint8_t break_flags_ = 0;  // a combination of WasmInterpreter::BreakFlag
1257   uint64_t num_interpreted_calls_ = 0;
1258   // Store the stack height of each activation (for unwind and frame
1259   // inspection).
1260   ZoneVector<Activation> activations_;
1261 
codemap() const1262   CodeMap* codemap() const { return codemap_; }
module() const1263   const WasmModule* module() const { return codemap_->module(); }
1264 
DoTrap(TrapReason trap,pc_t pc)1265   void DoTrap(TrapReason trap, pc_t pc) {
1266     TRACE("TRAP: %s\n", WasmOpcodes::TrapReasonMessage(trap));
1267     state_ = WasmInterpreter::TRAPPED;
1268     trap_reason_ = trap;
1269     CommitPc(pc);
1270   }
1271 
1272   // Push a frame with arguments already on the stack.
PushFrame(InterpreterCode * code)1273   void PushFrame(InterpreterCode* code) {
1274     DCHECK_NOT_NULL(code);
1275     DCHECK_NOT_NULL(code->side_table);
1276     EnsureStackSpace(code->side_table->max_stack_height_ +
1277                      code->locals.type_list.size());
1278 
1279     ++num_interpreted_calls_;
1280     size_t arity = code->function->sig->parameter_count();
1281     // The parameters will overlap the arguments already on the stack.
1282     DCHECK_GE(StackHeight(), arity);
1283     frames_.push_back({code, 0, StackHeight() - arity});
1284     frames_.back().pc = InitLocals(code);
1285     TRACE("  => PushFrame #%zu (#%u @%zu)\n", frames_.size() - 1,
1286           code->function->func_index, frames_.back().pc);
1287   }
1288 
InitLocals(InterpreterCode * code)1289   pc_t InitLocals(InterpreterCode* code) {
1290     for (auto p : code->locals.type_list) {
1291       WasmValue val;
1292       switch (p) {
1293 #define CASE_TYPE(wasm, ctype) \
1294   case kWasm##wasm:            \
1295     val = WasmValue(ctype{});  \
1296     break;
1297         WASM_CTYPES(CASE_TYPE)
1298 #undef CASE_TYPE
1299         default:
1300           UNREACHABLE();
1301           break;
1302       }
1303       Push(val);
1304     }
1305     return code->locals.encoded_size;
1306   }
1307 
CommitPc(pc_t pc)1308   void CommitPc(pc_t pc) {
1309     DCHECK(!frames_.empty());
1310     frames_.back().pc = pc;
1311   }
1312 
SkipBreakpoint(InterpreterCode * code,pc_t pc)1313   bool SkipBreakpoint(InterpreterCode* code, pc_t pc) {
1314     if (pc == break_pc_) {
1315       // Skip the previously hit breakpoint when resuming.
1316       break_pc_ = kInvalidPc;
1317       return true;
1318     }
1319     return false;
1320   }
1321 
LookupTargetDelta(InterpreterCode * code,pc_t pc)1322   int LookupTargetDelta(InterpreterCode* code, pc_t pc) {
1323     return static_cast<int>(code->side_table->Lookup(pc).pc_diff);
1324   }
1325 
DoBreak(InterpreterCode * code,pc_t pc,size_t depth)1326   int DoBreak(InterpreterCode* code, pc_t pc, size_t depth) {
1327     ControlTransferEntry& control_transfer_entry = code->side_table->Lookup(pc);
1328     DoStackTransfer(sp_ - control_transfer_entry.sp_diff,
1329                     control_transfer_entry.target_arity);
1330     return control_transfer_entry.pc_diff;
1331   }
1332 
ReturnPc(Decoder * decoder,InterpreterCode * code,pc_t pc)1333   pc_t ReturnPc(Decoder* decoder, InterpreterCode* code, pc_t pc) {
1334     switch (code->orig_start[pc]) {
1335       case kExprCallFunction: {
1336         CallFunctionImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
1337         return pc + 1 + imm.length;
1338       }
1339       case kExprCallIndirect: {
1340         CallIndirectImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
1341         return pc + 1 + imm.length;
1342       }
1343       default:
1344         UNREACHABLE();
1345     }
1346   }
1347 
DoReturn(Decoder * decoder,InterpreterCode ** code,pc_t * pc,pc_t * limit,size_t arity)1348   bool DoReturn(Decoder* decoder, InterpreterCode** code, pc_t* pc, pc_t* limit,
1349                 size_t arity) {
1350     DCHECK_GT(frames_.size(), 0);
1351     WasmValue* sp_dest = stack_.get() + frames_.back().sp;
1352     frames_.pop_back();
1353     if (frames_.size() == current_activation().fp) {
1354       // A return from the last frame terminates the execution.
1355       state_ = WasmInterpreter::FINISHED;
1356       DoStackTransfer(sp_dest, arity);
1357       TRACE("  => finish\n");
1358       return false;
1359     } else {
1360       // Return to caller frame.
1361       Frame* top = &frames_.back();
1362       *code = top->code;
1363       decoder->Reset((*code)->start, (*code)->end);
1364       *pc = ReturnPc(decoder, *code, top->pc);
1365       *limit = top->code->end - top->code->start;
1366       TRACE("  => Return to #%zu (#%u @%zu)\n", frames_.size() - 1,
1367             (*code)->function->func_index, *pc);
1368       DoStackTransfer(sp_dest, arity);
1369       return true;
1370     }
1371   }
1372 
1373   // Returns true if the call was successful, false if the stack check failed
1374   // and the current activation was fully unwound.
DoCall(Decoder * decoder,InterpreterCode * target,pc_t * pc,pc_t * limit)1375   bool DoCall(Decoder* decoder, InterpreterCode* target, pc_t* pc,
1376               pc_t* limit) V8_WARN_UNUSED_RESULT {
1377     frames_.back().pc = *pc;
1378     PushFrame(target);
1379     if (!DoStackCheck()) return false;
1380     *pc = frames_.back().pc;
1381     *limit = target->end - target->start;
1382     decoder->Reset(target->start, target->end);
1383     return true;
1384   }
1385 
1386   // Copies {arity} values on the top of the stack down the stack to {dest},
1387   // dropping the values in-between.
DoStackTransfer(WasmValue * dest,size_t arity)1388   void DoStackTransfer(WasmValue* dest, size_t arity) {
1389     // before: |---------------| pop_count | arity |
1390     //         ^ 0             ^ dest              ^ sp_
1391     //
1392     // after:  |---------------| arity |
1393     //         ^ 0                     ^ sp_
1394     DCHECK_LE(dest, sp_);
1395     DCHECK_LE(dest + arity, sp_);
1396     if (arity) memmove(dest, sp_ - arity, arity * sizeof(*sp_));
1397     sp_ = dest + arity;
1398   }
1399 
1400   template <typename mtype>
BoundsCheckMem(uint32_t offset,uint32_t index)1401   inline Address BoundsCheckMem(uint32_t offset, uint32_t index) {
1402     size_t mem_size = instance_object_->memory_size();
1403     if (sizeof(mtype) > mem_size) return kNullAddress;
1404     if (offset > (mem_size - sizeof(mtype))) return kNullAddress;
1405     if (index > (mem_size - sizeof(mtype) - offset)) return kNullAddress;
1406     // Compute the effective address of the access, making sure to condition
1407     // the index even in the in-bounds case.
1408     return reinterpret_cast<Address>(instance_object_->memory_start()) +
1409            offset + (index & instance_object_->memory_mask());
1410   }
1411 
1412   template <typename ctype, typename mtype>
ExecuteLoad(Decoder * decoder,InterpreterCode * code,pc_t pc,int & len,MachineRepresentation rep)1413   bool ExecuteLoad(Decoder* decoder, InterpreterCode* code, pc_t pc, int& len,
1414                    MachineRepresentation rep) {
1415     MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc),
1416                                                     sizeof(ctype));
1417     uint32_t index = Pop().to<uint32_t>();
1418     Address addr = BoundsCheckMem<mtype>(imm.offset, index);
1419     if (!addr) {
1420       DoTrap(kTrapMemOutOfBounds, pc);
1421       return false;
1422     }
1423     WasmValue result(
1424         converter<ctype, mtype>{}(ReadLittleEndianValue<mtype>(addr)));
1425 
1426     Push(result);
1427     len = 1 + imm.length;
1428 
1429     if (FLAG_wasm_trace_memory) {
1430       MemoryTracingInfo info(imm.offset + index, false, rep);
1431       TraceMemoryOperation(ExecutionTier::kInterpreter, &info,
1432                            code->function->func_index, static_cast<int>(pc),
1433                            instance_object_->memory_start());
1434     }
1435 
1436     return true;
1437   }
1438 
1439   template <typename ctype, typename mtype>
ExecuteStore(Decoder * decoder,InterpreterCode * code,pc_t pc,int & len,MachineRepresentation rep)1440   bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc, int& len,
1441                     MachineRepresentation rep) {
1442     MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc),
1443                                                     sizeof(ctype));
1444     ctype val = Pop().to<ctype>();
1445 
1446     uint32_t index = Pop().to<uint32_t>();
1447     Address addr = BoundsCheckMem<mtype>(imm.offset, index);
1448     if (!addr) {
1449       DoTrap(kTrapMemOutOfBounds, pc);
1450       return false;
1451     }
1452     WriteLittleEndianValue<mtype>(addr, converter<mtype, ctype>{}(val));
1453     len = 1 + imm.length;
1454 
1455     if (FLAG_wasm_trace_memory) {
1456       MemoryTracingInfo info(imm.offset + index, true, rep);
1457       TraceMemoryOperation(ExecutionTier::kInterpreter, &info,
1458                            code->function->func_index, static_cast<int>(pc),
1459                            instance_object_->memory_start());
1460     }
1461 
1462     return true;
1463   }
1464 
1465   template <typename type, typename op_type>
ExtractAtomicOpParams(Decoder * decoder,InterpreterCode * code,Address & address,pc_t pc,int & len,type * val=nullptr,type * val2=nullptr)1466   bool ExtractAtomicOpParams(Decoder* decoder, InterpreterCode* code,
1467                              Address& address, pc_t pc, int& len,
1468                              type* val = nullptr, type* val2 = nullptr) {
1469     MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc + 1),
1470                                                     sizeof(type));
1471     if (val2) *val2 = static_cast<type>(Pop().to<op_type>());
1472     if (val) *val = static_cast<type>(Pop().to<op_type>());
1473     uint32_t index = Pop().to<uint32_t>();
1474     address = BoundsCheckMem<type>(imm.offset, index);
1475     if (!address) {
1476       DoTrap(kTrapMemOutOfBounds, pc);
1477       return false;
1478     }
1479     len = 2 + imm.length;
1480     return true;
1481   }
1482 
ExecuteNumericOp(WasmOpcode opcode,Decoder * decoder,InterpreterCode * code,pc_t pc,int & len)1483   bool ExecuteNumericOp(WasmOpcode opcode, Decoder* decoder,
1484                         InterpreterCode* code, pc_t pc, int& len) {
1485     switch (opcode) {
1486       case kExprI32SConvertSatF32:
1487         Push(WasmValue(ExecuteConvertSaturate<int32_t>(Pop().to<float>())));
1488         return true;
1489       case kExprI32UConvertSatF32:
1490         Push(WasmValue(ExecuteConvertSaturate<uint32_t>(Pop().to<float>())));
1491         return true;
1492       case kExprI32SConvertSatF64:
1493         Push(WasmValue(ExecuteConvertSaturate<int32_t>(Pop().to<double>())));
1494         return true;
1495       case kExprI32UConvertSatF64:
1496         Push(WasmValue(ExecuteConvertSaturate<uint32_t>(Pop().to<double>())));
1497         return true;
1498       case kExprI64SConvertSatF32:
1499         Push(WasmValue(ExecuteI64SConvertSatF32(Pop().to<float>())));
1500         return true;
1501       case kExprI64UConvertSatF32:
1502         Push(WasmValue(ExecuteI64UConvertSatF32(Pop().to<float>())));
1503         return true;
1504       case kExprI64SConvertSatF64:
1505         Push(WasmValue(ExecuteI64SConvertSatF64(Pop().to<double>())));
1506         return true;
1507       case kExprI64UConvertSatF64:
1508         Push(WasmValue(ExecuteI64UConvertSatF64(Pop().to<double>())));
1509         return true;
1510       default:
1511         FATAL("Unknown or unimplemented opcode #%d:%s", code->start[pc],
1512               OpcodeName(code->start[pc]));
1513         UNREACHABLE();
1514     }
1515     return false;
1516   }
1517 
ExecuteAtomicOp(WasmOpcode opcode,Decoder * decoder,InterpreterCode * code,pc_t pc,int & len)1518   bool ExecuteAtomicOp(WasmOpcode opcode, Decoder* decoder,
1519                        InterpreterCode* code, pc_t pc, int& len) {
1520     WasmValue result;
1521     switch (opcode) {
1522 // Disabling on Mips as 32 bit atomics are not correctly laid out for load/store
1523 // on big endian and 64 bit atomics fail to compile.
1524 #if !(V8_TARGET_ARCH_MIPS && V8_TARGET_BIG_ENDIAN)
1525 #define ATOMIC_BINOP_CASE(name, type, op_type, operation)                   \
1526   case kExpr##name: {                                                       \
1527     type val;                                                               \
1528     Address addr;                                                           \
1529     if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \
1530                                               &val)) {                      \
1531       return false;                                                         \
1532     }                                                                       \
1533     static_assert(sizeof(std::atomic<type>) == sizeof(type),                \
1534                   "Size mismatch for types std::atomic<" #type              \
1535                   ">, and " #type);                                         \
1536     result = WasmValue(static_cast<op_type>(                                \
1537         std::operation(reinterpret_cast<std::atomic<type>*>(addr), val)));  \
1538     Push(result);                                                           \
1539     break;                                                                  \
1540   }
1541       ATOMIC_BINOP_CASE(I32AtomicAdd, uint32_t, uint32_t, atomic_fetch_add);
1542       ATOMIC_BINOP_CASE(I32AtomicAdd8U, uint8_t, uint32_t, atomic_fetch_add);
1543       ATOMIC_BINOP_CASE(I32AtomicAdd16U, uint16_t, uint32_t, atomic_fetch_add);
1544       ATOMIC_BINOP_CASE(I32AtomicSub, uint32_t, uint32_t, atomic_fetch_sub);
1545       ATOMIC_BINOP_CASE(I32AtomicSub8U, uint8_t, uint32_t, atomic_fetch_sub);
1546       ATOMIC_BINOP_CASE(I32AtomicSub16U, uint16_t, uint32_t, atomic_fetch_sub);
1547       ATOMIC_BINOP_CASE(I32AtomicAnd, uint32_t, uint32_t, atomic_fetch_and);
1548       ATOMIC_BINOP_CASE(I32AtomicAnd8U, uint8_t, uint32_t, atomic_fetch_and);
1549       ATOMIC_BINOP_CASE(I32AtomicAnd16U, uint16_t, uint32_t, atomic_fetch_and);
1550       ATOMIC_BINOP_CASE(I32AtomicOr, uint32_t, uint32_t, atomic_fetch_or);
1551       ATOMIC_BINOP_CASE(I32AtomicOr8U, uint8_t, uint32_t, atomic_fetch_or);
1552       ATOMIC_BINOP_CASE(I32AtomicOr16U, uint16_t, uint32_t, atomic_fetch_or);
1553       ATOMIC_BINOP_CASE(I32AtomicXor, uint32_t, uint32_t, atomic_fetch_xor);
1554       ATOMIC_BINOP_CASE(I32AtomicXor8U, uint8_t, uint32_t, atomic_fetch_xor);
1555       ATOMIC_BINOP_CASE(I32AtomicXor16U, uint16_t, uint32_t, atomic_fetch_xor);
1556       ATOMIC_BINOP_CASE(I32AtomicExchange, uint32_t, uint32_t, atomic_exchange);
1557       ATOMIC_BINOP_CASE(I32AtomicExchange8U, uint8_t, uint32_t,
1558                         atomic_exchange);
1559       ATOMIC_BINOP_CASE(I32AtomicExchange16U, uint16_t, uint32_t,
1560                         atomic_exchange);
1561       ATOMIC_BINOP_CASE(I64AtomicAdd, uint64_t, uint64_t, atomic_fetch_add);
1562       ATOMIC_BINOP_CASE(I64AtomicAdd8U, uint8_t, uint64_t, atomic_fetch_add);
1563       ATOMIC_BINOP_CASE(I64AtomicAdd16U, uint16_t, uint64_t, atomic_fetch_add);
1564       ATOMIC_BINOP_CASE(I64AtomicAdd32U, uint32_t, uint64_t, atomic_fetch_add);
1565       ATOMIC_BINOP_CASE(I64AtomicSub, uint64_t, uint64_t, atomic_fetch_sub);
1566       ATOMIC_BINOP_CASE(I64AtomicSub8U, uint8_t, uint64_t, atomic_fetch_sub);
1567       ATOMIC_BINOP_CASE(I64AtomicSub16U, uint16_t, uint64_t, atomic_fetch_sub);
1568       ATOMIC_BINOP_CASE(I64AtomicSub32U, uint32_t, uint64_t, atomic_fetch_sub);
1569       ATOMIC_BINOP_CASE(I64AtomicAnd, uint64_t, uint64_t, atomic_fetch_and);
1570       ATOMIC_BINOP_CASE(I64AtomicAnd8U, uint8_t, uint64_t, atomic_fetch_and);
1571       ATOMIC_BINOP_CASE(I64AtomicAnd16U, uint16_t, uint64_t, atomic_fetch_and);
1572       ATOMIC_BINOP_CASE(I64AtomicAnd32U, uint32_t, uint64_t, atomic_fetch_and);
1573       ATOMIC_BINOP_CASE(I64AtomicOr, uint64_t, uint64_t, atomic_fetch_or);
1574       ATOMIC_BINOP_CASE(I64AtomicOr8U, uint8_t, uint64_t, atomic_fetch_or);
1575       ATOMIC_BINOP_CASE(I64AtomicOr16U, uint16_t, uint64_t, atomic_fetch_or);
1576       ATOMIC_BINOP_CASE(I64AtomicOr32U, uint32_t, uint64_t, atomic_fetch_or);
1577       ATOMIC_BINOP_CASE(I64AtomicXor, uint64_t, uint64_t, atomic_fetch_xor);
1578       ATOMIC_BINOP_CASE(I64AtomicXor8U, uint8_t, uint64_t, atomic_fetch_xor);
1579       ATOMIC_BINOP_CASE(I64AtomicXor16U, uint16_t, uint64_t, atomic_fetch_xor);
1580       ATOMIC_BINOP_CASE(I64AtomicXor32U, uint32_t, uint64_t, atomic_fetch_xor);
1581       ATOMIC_BINOP_CASE(I64AtomicExchange, uint64_t, uint64_t, atomic_exchange);
1582       ATOMIC_BINOP_CASE(I64AtomicExchange8U, uint8_t, uint64_t,
1583                         atomic_exchange);
1584       ATOMIC_BINOP_CASE(I64AtomicExchange16U, uint16_t, uint64_t,
1585                         atomic_exchange);
1586       ATOMIC_BINOP_CASE(I64AtomicExchange32U, uint32_t, uint64_t,
1587                         atomic_exchange);
1588 #undef ATOMIC_BINOP_CASE
1589 #define ATOMIC_COMPARE_EXCHANGE_CASE(name, type, op_type)                   \
1590   case kExpr##name: {                                                       \
1591     type val;                                                               \
1592     type val2;                                                              \
1593     Address addr;                                                           \
1594     if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \
1595                                               &val, &val2)) {               \
1596       return false;                                                         \
1597     }                                                                       \
1598     static_assert(sizeof(std::atomic<type>) == sizeof(type),                \
1599                   "Size mismatch for types std::atomic<" #type              \
1600                   ">, and " #type);                                         \
1601     std::atomic_compare_exchange_strong(                                    \
1602         reinterpret_cast<std::atomic<type>*>(addr), &val, val2);            \
1603     Push(WasmValue(static_cast<op_type>(val)));                             \
1604     break;                                                                  \
1605   }
1606       ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange, uint32_t,
1607                                    uint32_t);
1608       ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange8U, uint8_t,
1609                                    uint32_t);
1610       ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange16U, uint16_t,
1611                                    uint32_t);
1612       ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange, uint64_t,
1613                                    uint64_t);
1614       ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange8U, uint8_t,
1615                                    uint64_t);
1616       ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange16U, uint16_t,
1617                                    uint64_t);
1618       ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange32U, uint32_t,
1619                                    uint64_t);
1620 #undef ATOMIC_COMPARE_EXCHANGE_CASE
1621 #define ATOMIC_LOAD_CASE(name, type, op_type, operation)                       \
1622   case kExpr##name: {                                                          \
1623     Address addr;                                                              \
1624     if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len)) { \
1625       return false;                                                            \
1626     }                                                                          \
1627     static_assert(sizeof(std::atomic<type>) == sizeof(type),                   \
1628                   "Size mismatch for types std::atomic<" #type                 \
1629                   ">, and " #type);                                            \
1630     result = WasmValue(static_cast<op_type>(                                   \
1631         std::operation(reinterpret_cast<std::atomic<type>*>(addr))));          \
1632     Push(result);                                                              \
1633     break;                                                                     \
1634   }
1635       ATOMIC_LOAD_CASE(I32AtomicLoad, uint32_t, uint32_t, atomic_load);
1636       ATOMIC_LOAD_CASE(I32AtomicLoad8U, uint8_t, uint32_t, atomic_load);
1637       ATOMIC_LOAD_CASE(I32AtomicLoad16U, uint16_t, uint32_t, atomic_load);
1638       ATOMIC_LOAD_CASE(I64AtomicLoad, uint64_t, uint64_t, atomic_load);
1639       ATOMIC_LOAD_CASE(I64AtomicLoad8U, uint8_t, uint64_t, atomic_load);
1640       ATOMIC_LOAD_CASE(I64AtomicLoad16U, uint16_t, uint64_t, atomic_load);
1641       ATOMIC_LOAD_CASE(I64AtomicLoad32U, uint32_t, uint64_t, atomic_load);
1642 #undef ATOMIC_LOAD_CASE
1643 #define ATOMIC_STORE_CASE(name, type, op_type, operation)                   \
1644   case kExpr##name: {                                                       \
1645     type val;                                                               \
1646     Address addr;                                                           \
1647     if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \
1648                                               &val)) {                      \
1649       return false;                                                         \
1650     }                                                                       \
1651     static_assert(sizeof(std::atomic<type>) == sizeof(type),                \
1652                   "Size mismatch for types std::atomic<" #type              \
1653                   ">, and " #type);                                         \
1654     std::operation(reinterpret_cast<std::atomic<type>*>(addr), val);        \
1655     break;                                                                  \
1656   }
1657       ATOMIC_STORE_CASE(I32AtomicStore, uint32_t, uint32_t, atomic_store);
1658       ATOMIC_STORE_CASE(I32AtomicStore8U, uint8_t, uint32_t, atomic_store);
1659       ATOMIC_STORE_CASE(I32AtomicStore16U, uint16_t, uint32_t, atomic_store);
1660       ATOMIC_STORE_CASE(I64AtomicStore, uint64_t, uint64_t, atomic_store);
1661       ATOMIC_STORE_CASE(I64AtomicStore8U, uint8_t, uint64_t, atomic_store);
1662       ATOMIC_STORE_CASE(I64AtomicStore16U, uint16_t, uint64_t, atomic_store);
1663       ATOMIC_STORE_CASE(I64AtomicStore32U, uint32_t, uint64_t, atomic_store);
1664 #undef ATOMIC_STORE_CASE
1665 #endif  // !(V8_TARGET_ARCH_MIPS && V8_TARGET_BIG_ENDIAN)
1666       default:
1667         UNREACHABLE();
1668         return false;
1669     }
1670     return true;
1671   }
1672 
GetGlobalPtr(const WasmGlobal * global)1673   byte* GetGlobalPtr(const WasmGlobal* global) {
1674     if (global->mutability && global->imported) {
1675       return reinterpret_cast<byte*>(
1676           instance_object_->imported_mutable_globals()[global->index]);
1677     } else {
1678       return instance_object_->globals_start() + global->offset;
1679     }
1680   }
1681 
ExecuteSimdOp(WasmOpcode opcode,Decoder * decoder,InterpreterCode * code,pc_t pc,int & len)1682   bool ExecuteSimdOp(WasmOpcode opcode, Decoder* decoder, InterpreterCode* code,
1683                      pc_t pc, int& len) {
1684     switch (opcode) {
1685 #define SPLAT_CASE(format, sType, valType, num) \
1686   case kExpr##format##Splat: {                  \
1687     WasmValue val = Pop();                      \
1688     valType v = val.to<valType>();              \
1689     sType s;                                    \
1690     for (int i = 0; i < num; i++) s.val[i] = v; \
1691     Push(WasmValue(Simd128(s)));                \
1692     return true;                                \
1693   }
1694       SPLAT_CASE(I32x4, int4, int32_t, 4)
1695       SPLAT_CASE(F32x4, float4, float, 4)
1696       SPLAT_CASE(I16x8, int8, int32_t, 8)
1697       SPLAT_CASE(I8x16, int16, int32_t, 16)
1698 #undef SPLAT_CASE
1699 #define EXTRACT_LANE_CASE(format, name)                                 \
1700   case kExpr##format##ExtractLane: {                                    \
1701     SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \
1702     ++len;                                                              \
1703     WasmValue val = Pop();                                              \
1704     Simd128 s = val.to_s128();                                          \
1705     auto ss = s.to_##name();                                            \
1706     Push(WasmValue(ss.val[LANE(imm.lane, ss)]));                        \
1707     return true;                                                        \
1708   }
1709       EXTRACT_LANE_CASE(I32x4, i32x4)
1710       EXTRACT_LANE_CASE(F32x4, f32x4)
1711       EXTRACT_LANE_CASE(I16x8, i16x8)
1712       EXTRACT_LANE_CASE(I8x16, i8x16)
1713 #undef EXTRACT_LANE_CASE
1714 #define BINOP_CASE(op, name, stype, count, expr) \
1715   case kExpr##op: {                              \
1716     WasmValue v2 = Pop();                        \
1717     WasmValue v1 = Pop();                        \
1718     stype s1 = v1.to_s128().to_##name();         \
1719     stype s2 = v2.to_s128().to_##name();         \
1720     stype res;                                   \
1721     for (size_t i = 0; i < count; ++i) {         \
1722       auto a = s1.val[LANE(i, s1)];              \
1723       auto b = s2.val[LANE(i, s1)];              \
1724       res.val[LANE(i, s1)] = expr;               \
1725     }                                            \
1726     Push(WasmValue(Simd128(res)));               \
1727     return true;                                 \
1728   }
1729       BINOP_CASE(F32x4Add, f32x4, float4, 4, a + b)
1730       BINOP_CASE(F32x4Sub, f32x4, float4, 4, a - b)
1731       BINOP_CASE(F32x4Mul, f32x4, float4, 4, a * b)
1732       BINOP_CASE(F32x4Min, f32x4, float4, 4, a < b ? a : b)
1733       BINOP_CASE(F32x4Max, f32x4, float4, 4, a > b ? a : b)
1734       BINOP_CASE(I32x4Add, i32x4, int4, 4, a + b)
1735       BINOP_CASE(I32x4Sub, i32x4, int4, 4, a - b)
1736       BINOP_CASE(I32x4Mul, i32x4, int4, 4, a * b)
1737       BINOP_CASE(I32x4MinS, i32x4, int4, 4, a < b ? a : b)
1738       BINOP_CASE(I32x4MinU, i32x4, int4, 4,
1739                  static_cast<uint32_t>(a) < static_cast<uint32_t>(b) ? a : b)
1740       BINOP_CASE(I32x4MaxS, i32x4, int4, 4, a > b ? a : b)
1741       BINOP_CASE(I32x4MaxU, i32x4, int4, 4,
1742                  static_cast<uint32_t>(a) > static_cast<uint32_t>(b) ? a : b)
1743       BINOP_CASE(S128And, i32x4, int4, 4, a & b)
1744       BINOP_CASE(S128Or, i32x4, int4, 4, a | b)
1745       BINOP_CASE(S128Xor, i32x4, int4, 4, a ^ b)
1746       BINOP_CASE(I16x8Add, i16x8, int8, 8, a + b)
1747       BINOP_CASE(I16x8Sub, i16x8, int8, 8, a - b)
1748       BINOP_CASE(I16x8Mul, i16x8, int8, 8, a * b)
1749       BINOP_CASE(I16x8MinS, i16x8, int8, 8, a < b ? a : b)
1750       BINOP_CASE(I16x8MinU, i16x8, int8, 8,
1751                  static_cast<uint16_t>(a) < static_cast<uint16_t>(b) ? a : b)
1752       BINOP_CASE(I16x8MaxS, i16x8, int8, 8, a > b ? a : b)
1753       BINOP_CASE(I16x8MaxU, i16x8, int8, 8,
1754                  static_cast<uint16_t>(a) > static_cast<uint16_t>(b) ? a : b)
1755       BINOP_CASE(I16x8AddSaturateS, i16x8, int8, 8, SaturateAdd<int16_t>(a, b))
1756       BINOP_CASE(I16x8AddSaturateU, i16x8, int8, 8, SaturateAdd<uint16_t>(a, b))
1757       BINOP_CASE(I16x8SubSaturateS, i16x8, int8, 8, SaturateSub<int16_t>(a, b))
1758       BINOP_CASE(I16x8SubSaturateU, i16x8, int8, 8, SaturateSub<uint16_t>(a, b))
1759       BINOP_CASE(I8x16Add, i8x16, int16, 16, a + b)
1760       BINOP_CASE(I8x16Sub, i8x16, int16, 16, a - b)
1761       BINOP_CASE(I8x16Mul, i8x16, int16, 16, a * b)
1762       BINOP_CASE(I8x16MinS, i8x16, int16, 16, a < b ? a : b)
1763       BINOP_CASE(I8x16MinU, i8x16, int16, 16,
1764                  static_cast<uint8_t>(a) < static_cast<uint8_t>(b) ? a : b)
1765       BINOP_CASE(I8x16MaxS, i8x16, int16, 16, a > b ? a : b)
1766       BINOP_CASE(I8x16MaxU, i8x16, int16, 16,
1767                  static_cast<uint8_t>(a) > static_cast<uint8_t>(b) ? a : b)
1768       BINOP_CASE(I8x16AddSaturateS, i8x16, int16, 16, SaturateAdd<int8_t>(a, b))
1769       BINOP_CASE(I8x16AddSaturateU, i8x16, int16, 16,
1770                  SaturateAdd<uint8_t>(a, b))
1771       BINOP_CASE(I8x16SubSaturateS, i8x16, int16, 16, SaturateSub<int8_t>(a, b))
1772       BINOP_CASE(I8x16SubSaturateU, i8x16, int16, 16,
1773                  SaturateSub<uint8_t>(a, b))
1774 #undef BINOP_CASE
1775 #define UNOP_CASE(op, name, stype, count, expr) \
1776   case kExpr##op: {                             \
1777     WasmValue v = Pop();                        \
1778     stype s = v.to_s128().to_##name();          \
1779     stype res;                                  \
1780     for (size_t i = 0; i < count; ++i) {        \
1781       auto a = s.val[i];                        \
1782       res.val[i] = expr;                        \
1783     }                                           \
1784     Push(WasmValue(Simd128(res)));              \
1785     return true;                                \
1786   }
1787       UNOP_CASE(F32x4Abs, f32x4, float4, 4, std::abs(a))
1788       UNOP_CASE(F32x4Neg, f32x4, float4, 4, -a)
1789       UNOP_CASE(F32x4RecipApprox, f32x4, float4, 4, 1.0f / a)
1790       UNOP_CASE(F32x4RecipSqrtApprox, f32x4, float4, 4, 1.0f / std::sqrt(a))
1791       UNOP_CASE(I32x4Neg, i32x4, int4, 4, -a)
1792       UNOP_CASE(S128Not, i32x4, int4, 4, ~a)
1793       UNOP_CASE(I16x8Neg, i16x8, int8, 8, -a)
1794       UNOP_CASE(I8x16Neg, i8x16, int16, 16, -a)
1795 #undef UNOP_CASE
1796 #define CMPOP_CASE(op, name, stype, out_stype, count, expr) \
1797   case kExpr##op: {                                         \
1798     WasmValue v2 = Pop();                                   \
1799     WasmValue v1 = Pop();                                   \
1800     stype s1 = v1.to_s128().to_##name();                    \
1801     stype s2 = v2.to_s128().to_##name();                    \
1802     out_stype res;                                          \
1803     for (size_t i = 0; i < count; ++i) {                    \
1804       auto a = s1.val[i];                                   \
1805       auto b = s2.val[i];                                   \
1806       res.val[i] = expr ? -1 : 0;                           \
1807     }                                                       \
1808     Push(WasmValue(Simd128(res)));                          \
1809     return true;                                            \
1810   }
1811       CMPOP_CASE(F32x4Eq, f32x4, float4, int4, 4, a == b)
1812       CMPOP_CASE(F32x4Ne, f32x4, float4, int4, 4, a != b)
1813       CMPOP_CASE(F32x4Gt, f32x4, float4, int4, 4, a > b)
1814       CMPOP_CASE(F32x4Ge, f32x4, float4, int4, 4, a >= b)
1815       CMPOP_CASE(F32x4Lt, f32x4, float4, int4, 4, a < b)
1816       CMPOP_CASE(F32x4Le, f32x4, float4, int4, 4, a <= b)
1817       CMPOP_CASE(I32x4Eq, i32x4, int4, int4, 4, a == b)
1818       CMPOP_CASE(I32x4Ne, i32x4, int4, int4, 4, a != b)
1819       CMPOP_CASE(I32x4GtS, i32x4, int4, int4, 4, a > b)
1820       CMPOP_CASE(I32x4GeS, i32x4, int4, int4, 4, a >= b)
1821       CMPOP_CASE(I32x4LtS, i32x4, int4, int4, 4, a < b)
1822       CMPOP_CASE(I32x4LeS, i32x4, int4, int4, 4, a <= b)
1823       CMPOP_CASE(I32x4GtU, i32x4, int4, int4, 4,
1824                  static_cast<uint32_t>(a) > static_cast<uint32_t>(b))
1825       CMPOP_CASE(I32x4GeU, i32x4, int4, int4, 4,
1826                  static_cast<uint32_t>(a) >= static_cast<uint32_t>(b))
1827       CMPOP_CASE(I32x4LtU, i32x4, int4, int4, 4,
1828                  static_cast<uint32_t>(a) < static_cast<uint32_t>(b))
1829       CMPOP_CASE(I32x4LeU, i32x4, int4, int4, 4,
1830                  static_cast<uint32_t>(a) <= static_cast<uint32_t>(b))
1831       CMPOP_CASE(I16x8Eq, i16x8, int8, int8, 8, a == b)
1832       CMPOP_CASE(I16x8Ne, i16x8, int8, int8, 8, a != b)
1833       CMPOP_CASE(I16x8GtS, i16x8, int8, int8, 8, a > b)
1834       CMPOP_CASE(I16x8GeS, i16x8, int8, int8, 8, a >= b)
1835       CMPOP_CASE(I16x8LtS, i16x8, int8, int8, 8, a < b)
1836       CMPOP_CASE(I16x8LeS, i16x8, int8, int8, 8, a <= b)
1837       CMPOP_CASE(I16x8GtU, i16x8, int8, int8, 8,
1838                  static_cast<uint16_t>(a) > static_cast<uint16_t>(b))
1839       CMPOP_CASE(I16x8GeU, i16x8, int8, int8, 8,
1840                  static_cast<uint16_t>(a) >= static_cast<uint16_t>(b))
1841       CMPOP_CASE(I16x8LtU, i16x8, int8, int8, 8,
1842                  static_cast<uint16_t>(a) < static_cast<uint16_t>(b))
1843       CMPOP_CASE(I16x8LeU, i16x8, int8, int8, 8,
1844                  static_cast<uint16_t>(a) <= static_cast<uint16_t>(b))
1845       CMPOP_CASE(I8x16Eq, i8x16, int16, int16, 16, a == b)
1846       CMPOP_CASE(I8x16Ne, i8x16, int16, int16, 16, a != b)
1847       CMPOP_CASE(I8x16GtS, i8x16, int16, int16, 16, a > b)
1848       CMPOP_CASE(I8x16GeS, i8x16, int16, int16, 16, a >= b)
1849       CMPOP_CASE(I8x16LtS, i8x16, int16, int16, 16, a < b)
1850       CMPOP_CASE(I8x16LeS, i8x16, int16, int16, 16, a <= b)
1851       CMPOP_CASE(I8x16GtU, i8x16, int16, int16, 16,
1852                  static_cast<uint8_t>(a) > static_cast<uint8_t>(b))
1853       CMPOP_CASE(I8x16GeU, i8x16, int16, int16, 16,
1854                  static_cast<uint8_t>(a) >= static_cast<uint8_t>(b))
1855       CMPOP_CASE(I8x16LtU, i8x16, int16, int16, 16,
1856                  static_cast<uint8_t>(a) < static_cast<uint8_t>(b))
1857       CMPOP_CASE(I8x16LeU, i8x16, int16, int16, 16,
1858                  static_cast<uint8_t>(a) <= static_cast<uint8_t>(b))
1859 #undef CMPOP_CASE
1860 #define REPLACE_LANE_CASE(format, name, stype, ctype)                   \
1861   case kExpr##format##ReplaceLane: {                                    \
1862     SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \
1863     ++len;                                                              \
1864     WasmValue new_val = Pop();                                          \
1865     WasmValue simd_val = Pop();                                         \
1866     stype s = simd_val.to_s128().to_##name();                           \
1867     s.val[LANE(imm.lane, s)] = new_val.to<ctype>();                     \
1868     Push(WasmValue(Simd128(s)));                                        \
1869     return true;                                                        \
1870   }
1871       REPLACE_LANE_CASE(F32x4, f32x4, float4, float)
1872       REPLACE_LANE_CASE(I32x4, i32x4, int4, int32_t)
1873       REPLACE_LANE_CASE(I16x8, i16x8, int8, int32_t)
1874       REPLACE_LANE_CASE(I8x16, i8x16, int16, int32_t)
1875 #undef REPLACE_LANE_CASE
1876       case kExprS128LoadMem:
1877         return ExecuteLoad<Simd128, Simd128>(decoder, code, pc, len,
1878                                              MachineRepresentation::kSimd128);
1879       case kExprS128StoreMem:
1880         return ExecuteStore<Simd128, Simd128>(decoder, code, pc, len,
1881                                               MachineRepresentation::kSimd128);
1882 #define SHIFT_CASE(op, name, stype, count, expr)                         \
1883   case kExpr##op: {                                                      \
1884     SimdShiftImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \
1885     ++len;                                                               \
1886     WasmValue v = Pop();                                                 \
1887     stype s = v.to_s128().to_##name();                                   \
1888     stype res;                                                           \
1889     for (size_t i = 0; i < count; ++i) {                                 \
1890       auto a = s.val[i];                                                 \
1891       res.val[i] = expr;                                                 \
1892     }                                                                    \
1893     Push(WasmValue(Simd128(res)));                                       \
1894     return true;                                                         \
1895   }
1896         SHIFT_CASE(I32x4Shl, i32x4, int4, 4, a << imm.shift)
1897         SHIFT_CASE(I32x4ShrS, i32x4, int4, 4, a >> imm.shift)
1898         SHIFT_CASE(I32x4ShrU, i32x4, int4, 4,
1899                    static_cast<uint32_t>(a) >> imm.shift)
1900         SHIFT_CASE(I16x8Shl, i16x8, int8, 8, a << imm.shift)
1901         SHIFT_CASE(I16x8ShrS, i16x8, int8, 8, a >> imm.shift)
1902         SHIFT_CASE(I16x8ShrU, i16x8, int8, 8,
1903                    static_cast<uint16_t>(a) >> imm.shift)
1904         SHIFT_CASE(I8x16Shl, i8x16, int16, 16, a << imm.shift)
1905         SHIFT_CASE(I8x16ShrS, i8x16, int16, 16, a >> imm.shift)
1906         SHIFT_CASE(I8x16ShrU, i8x16, int16, 16,
1907                    static_cast<uint8_t>(a) >> imm.shift)
1908 #undef SHIFT_CASE
1909 #define CONVERT_CASE(op, src_type, name, dst_type, count, start_index, ctype, \
1910                      expr)                                                    \
1911   case kExpr##op: {                                                           \
1912     WasmValue v = Pop();                                                      \
1913     src_type s = v.to_s128().to_##name();                                     \
1914     dst_type res;                                                             \
1915     for (size_t i = 0; i < count; ++i) {                                      \
1916       ctype a = s.val[LANE(start_index + i, s)];                              \
1917       res.val[LANE(i, res)] = expr;                                           \
1918     }                                                                         \
1919     Push(WasmValue(Simd128(res)));                                            \
1920     return true;                                                              \
1921   }
1922         CONVERT_CASE(F32x4SConvertI32x4, int4, i32x4, float4, 4, 0, int32_t,
1923                      static_cast<float>(a))
1924         CONVERT_CASE(F32x4UConvertI32x4, int4, i32x4, float4, 4, 0, uint32_t,
1925                      static_cast<float>(a))
1926         CONVERT_CASE(I32x4SConvertF32x4, float4, f32x4, int4, 4, 0, double,
1927                      std::isnan(a) ? 0
1928                                    : a<kMinInt ? kMinInt : a> kMaxInt
1929                                          ? kMaxInt
1930                                          : static_cast<int32_t>(a))
1931         CONVERT_CASE(I32x4UConvertF32x4, float4, f32x4, int4, 4, 0, double,
1932                      std::isnan(a)
1933                          ? 0
1934                          : a<0 ? 0 : a> kMaxUInt32 ? kMaxUInt32
1935                                                    : static_cast<uint32_t>(a))
1936         CONVERT_CASE(I32x4SConvertI16x8High, int8, i16x8, int4, 4, 4, int16_t,
1937                      a)
1938         CONVERT_CASE(I32x4UConvertI16x8High, int8, i16x8, int4, 4, 4, uint16_t,
1939                      a)
1940         CONVERT_CASE(I32x4SConvertI16x8Low, int8, i16x8, int4, 4, 0, int16_t, a)
1941         CONVERT_CASE(I32x4UConvertI16x8Low, int8, i16x8, int4, 4, 0, uint16_t,
1942                      a)
1943         CONVERT_CASE(I16x8SConvertI8x16High, int16, i8x16, int8, 8, 8, int8_t,
1944                      a)
1945         CONVERT_CASE(I16x8UConvertI8x16High, int16, i8x16, int8, 8, 8, uint8_t,
1946                      a)
1947         CONVERT_CASE(I16x8SConvertI8x16Low, int16, i8x16, int8, 8, 0, int8_t, a)
1948         CONVERT_CASE(I16x8UConvertI8x16Low, int16, i8x16, int8, 8, 0, uint8_t,
1949                      a)
1950 #undef CONVERT_CASE
1951 #define PACK_CASE(op, src_type, name, dst_type, count, ctype, dst_ctype,   \
1952                   is_unsigned)                                             \
1953   case kExpr##op: {                                                        \
1954     WasmValue v2 = Pop();                                                  \
1955     WasmValue v1 = Pop();                                                  \
1956     src_type s1 = v1.to_s128().to_##name();                                \
1957     src_type s2 = v2.to_s128().to_##name();                                \
1958     dst_type res;                                                          \
1959     int64_t min = std::numeric_limits<ctype>::min();                       \
1960     int64_t max = std::numeric_limits<ctype>::max();                       \
1961     for (size_t i = 0; i < count; ++i) {                                   \
1962       int32_t v = i < count / 2 ? s1.val[LANE(i, s1)]                      \
1963                                 : s2.val[LANE(i - count / 2, s2)];         \
1964       int64_t a = is_unsigned ? static_cast<int64_t>(v & 0xFFFFFFFFu) : v; \
1965       res.val[LANE(i, res)] =                                              \
1966           static_cast<dst_ctype>(std::max(min, std::min(max, a)));         \
1967     }                                                                      \
1968     Push(WasmValue(Simd128(res)));                                         \
1969     return true;                                                           \
1970   }
1971         PACK_CASE(I16x8SConvertI32x4, int4, i32x4, int8, 8, int16_t, int16_t,
1972                   false)
1973         PACK_CASE(I16x8UConvertI32x4, int4, i32x4, int8, 8, uint16_t, int16_t,
1974                   true)
1975         PACK_CASE(I8x16SConvertI16x8, int8, i16x8, int16, 16, int8_t, int8_t,
1976                   false)
1977         PACK_CASE(I8x16UConvertI16x8, int8, i16x8, int16, 16, uint8_t, int8_t,
1978                   true)
1979 #undef PACK_CASE
1980       case kExprS128Select: {
1981         int4 v2 = Pop().to_s128().to_i32x4();
1982         int4 v1 = Pop().to_s128().to_i32x4();
1983         int4 bool_val = Pop().to_s128().to_i32x4();
1984         int4 res;
1985         for (size_t i = 0; i < 4; ++i) {
1986           res.val[i] = v2.val[i] ^ ((v1.val[i] ^ v2.val[i]) & bool_val.val[i]);
1987         }
1988         Push(WasmValue(Simd128(res)));
1989         return true;
1990       }
1991 #define ADD_HORIZ_CASE(op, name, stype, count)                   \
1992   case kExpr##op: {                                              \
1993     WasmValue v2 = Pop();                                        \
1994     WasmValue v1 = Pop();                                        \
1995     stype s1 = v1.to_s128().to_##name();                         \
1996     stype s2 = v2.to_s128().to_##name();                         \
1997     stype res;                                                   \
1998     for (size_t i = 0; i < count / 2; ++i) {                     \
1999       res.val[LANE(i, s1)] =                                     \
2000           s1.val[LANE(i * 2, s1)] + s1.val[LANE(i * 2 + 1, s1)]; \
2001       res.val[LANE(i + count / 2, s1)] =                         \
2002           s2.val[LANE(i * 2, s1)] + s2.val[LANE(i * 2 + 1, s1)]; \
2003     }                                                            \
2004     Push(WasmValue(Simd128(res)));                               \
2005     return true;                                                 \
2006   }
2007         ADD_HORIZ_CASE(I32x4AddHoriz, i32x4, int4, 4)
2008         ADD_HORIZ_CASE(F32x4AddHoriz, f32x4, float4, 4)
2009         ADD_HORIZ_CASE(I16x8AddHoriz, i16x8, int8, 8)
2010 #undef ADD_HORIZ_CASE
2011       case kExprS8x16Shuffle: {
2012         Simd8x16ShuffleImmediate<Decoder::kNoValidate> imm(decoder,
2013                                                            code->at(pc));
2014         len += 16;
2015         int16 v2 = Pop().to_s128().to_i8x16();
2016         int16 v1 = Pop().to_s128().to_i8x16();
2017         int16 res;
2018         for (size_t i = 0; i < kSimd128Size; ++i) {
2019           int lane = imm.shuffle[i];
2020           res.val[LANE(i, v1)] = lane < kSimd128Size
2021                                      ? v1.val[LANE(lane, v1)]
2022                                      : v2.val[LANE(lane - kSimd128Size, v1)];
2023         }
2024         Push(WasmValue(Simd128(res)));
2025         return true;
2026       }
2027 #define REDUCTION_CASE(op, name, stype, count, operation) \
2028   case kExpr##op: {                                       \
2029     stype s = Pop().to_s128().to_##name();                \
2030     int32_t res = s.val[0];                               \
2031     for (size_t i = 1; i < count; ++i) {                  \
2032       res = res operation static_cast<int32_t>(s.val[i]); \
2033     }                                                     \
2034     Push(WasmValue(res));                                 \
2035     return true;                                          \
2036   }
2037         REDUCTION_CASE(S1x4AnyTrue, i32x4, int4, 4, |)
2038         REDUCTION_CASE(S1x4AllTrue, i32x4, int4, 4, &)
2039         REDUCTION_CASE(S1x8AnyTrue, i16x8, int8, 8, |)
2040         REDUCTION_CASE(S1x8AllTrue, i16x8, int8, 8, &)
2041         REDUCTION_CASE(S1x16AnyTrue, i8x16, int16, 16, |)
2042         REDUCTION_CASE(S1x16AllTrue, i8x16, int16, 16, &)
2043 #undef REDUCTION_CASE
2044       default:
2045         return false;
2046     }
2047   }
2048 
2049   // Check if our control stack (frames_) exceeds the limit. Trigger stack
2050   // overflow if it does, and unwinding the current frame.
2051   // Returns true if execution can continue, false if the current activation was
2052   // fully unwound.
2053   // Do call this function immediately *after* pushing a new frame. The pc of
2054   // the top frame will be reset to 0 if the stack check fails.
DoStackCheck()2055   bool DoStackCheck() V8_WARN_UNUSED_RESULT {
2056     // The goal of this stack check is not to prevent actual stack overflows,
2057     // but to simulate stack overflows during the execution of compiled code.
2058     // That is why this function uses FLAG_stack_size, even though the value
2059     // stack actually lies in zone memory.
2060     const size_t stack_size_limit = FLAG_stack_size * KB;
2061     // Sum up the value stack size and the control stack size.
2062     const size_t current_stack_size =
2063         (sp_ - stack_.get()) + frames_.size() * sizeof(Frame);
2064     if (V8_LIKELY(current_stack_size <= stack_size_limit)) {
2065       return true;
2066     }
2067     // The pc of the top frame is initialized to the first instruction. We reset
2068     // it to 0 here such that we report the same position as in compiled code.
2069     frames_.back().pc = 0;
2070     Isolate* isolate = instance_object_->GetIsolate();
2071     HandleScope handle_scope(isolate);
2072     isolate->StackOverflow();
2073     return HandleException(isolate) == WasmInterpreter::Thread::HANDLED;
2074   }
2075 
Execute(InterpreterCode * code,pc_t pc,int max)2076   void Execute(InterpreterCode* code, pc_t pc, int max) {
2077     DCHECK_NOT_NULL(code->side_table);
2078     DCHECK(!frames_.empty());
2079     // There must be enough space on the stack to hold the arguments, locals,
2080     // and the value stack.
2081     DCHECK_LE(code->function->sig->parameter_count() +
2082                   code->locals.type_list.size() +
2083                   code->side_table->max_stack_height_,
2084               stack_limit_ - stack_.get() - frames_.back().sp);
2085 
2086     Decoder decoder(code->start, code->end);
2087     pc_t limit = code->end - code->start;
2088     bool hit_break = false;
2089 
2090     while (true) {
2091 #define PAUSE_IF_BREAK_FLAG(flag)                                     \
2092   if (V8_UNLIKELY(break_flags_ & WasmInterpreter::BreakFlag::flag)) { \
2093     hit_break = true;                                                 \
2094     max = 0;                                                          \
2095   }
2096 
2097       DCHECK_GT(limit, pc);
2098       DCHECK_NOT_NULL(code->start);
2099 
2100       // Do first check for a breakpoint, in order to set hit_break correctly.
2101       const char* skip = "        ";
2102       int len = 1;
2103       byte orig = code->start[pc];
2104       WasmOpcode opcode = static_cast<WasmOpcode>(orig);
2105       if (WasmOpcodes::IsPrefixOpcode(opcode)) {
2106         opcode = static_cast<WasmOpcode>(opcode << 8 | code->start[pc + 1]);
2107       }
2108       if (V8_UNLIKELY(orig == kInternalBreakpoint)) {
2109         orig = code->orig_start[pc];
2110         if (WasmOpcodes::IsPrefixOpcode(static_cast<WasmOpcode>(orig))) {
2111           opcode =
2112               static_cast<WasmOpcode>(orig << 8 | code->orig_start[pc + 1]);
2113         }
2114         if (SkipBreakpoint(code, pc)) {
2115           // skip breakpoint by switching on original code.
2116           skip = "[skip]  ";
2117         } else {
2118           TRACE("@%-3zu: [break] %-24s:", pc, WasmOpcodes::OpcodeName(opcode));
2119           TraceValueStack();
2120           TRACE("\n");
2121           hit_break = true;
2122           break;
2123         }
2124       }
2125 
2126       // If max is 0, break. If max is positive (a limit is set), decrement it.
2127       if (max == 0) break;
2128       if (max > 0) --max;
2129 
2130       USE(skip);
2131       TRACE("@%-3zu: %s%-24s:", pc, skip, WasmOpcodes::OpcodeName(opcode));
2132       TraceValueStack();
2133       TRACE("\n");
2134 
2135 #ifdef DEBUG
2136       // Compute the stack effect of this opcode, and verify later that the
2137       // stack was modified accordingly.
2138       std::pair<uint32_t, uint32_t> stack_effect =
2139           StackEffect(codemap_->module(), frames_.back().code->function->sig,
2140                       code->orig_start + pc, code->orig_end);
2141       sp_t expected_new_stack_height =
2142           StackHeight() - stack_effect.first + stack_effect.second;
2143 #endif
2144 
2145       switch (orig) {
2146         case kExprNop:
2147           break;
2148         case kExprBlock: {
2149           BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures,
2150                                                        &decoder, code->at(pc));
2151           len = 1 + imm.length;
2152           break;
2153         }
2154         case kExprLoop: {
2155           BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures,
2156                                                        &decoder, code->at(pc));
2157           len = 1 + imm.length;
2158           break;
2159         }
2160         case kExprIf: {
2161           BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures,
2162                                                        &decoder, code->at(pc));
2163           WasmValue cond = Pop();
2164           bool is_true = cond.to<uint32_t>() != 0;
2165           if (is_true) {
2166             // fall through to the true block.
2167             len = 1 + imm.length;
2168             TRACE("  true => fallthrough\n");
2169           } else {
2170             len = LookupTargetDelta(code, pc);
2171             TRACE("  false => @%zu\n", pc + len);
2172           }
2173           break;
2174         }
2175         case kExprElse: {
2176           len = LookupTargetDelta(code, pc);
2177           TRACE("  end => @%zu\n", pc + len);
2178           break;
2179         }
2180         case kExprSelect: {
2181           WasmValue cond = Pop();
2182           WasmValue fval = Pop();
2183           WasmValue tval = Pop();
2184           Push(cond.to<int32_t>() != 0 ? tval : fval);
2185           break;
2186         }
2187         case kExprBr: {
2188           BreakDepthImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
2189           len = DoBreak(code, pc, imm.depth);
2190           TRACE("  br => @%zu\n", pc + len);
2191           break;
2192         }
2193         case kExprBrIf: {
2194           BreakDepthImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
2195           WasmValue cond = Pop();
2196           bool is_true = cond.to<uint32_t>() != 0;
2197           if (is_true) {
2198             len = DoBreak(code, pc, imm.depth);
2199             TRACE("  br_if => @%zu\n", pc + len);
2200           } else {
2201             TRACE("  false => fallthrough\n");
2202             len = 1 + imm.length;
2203           }
2204           break;
2205         }
2206         case kExprBrTable: {
2207           BranchTableImmediate<Decoder::kNoValidate> imm(&decoder,
2208                                                          code->at(pc));
2209           BranchTableIterator<Decoder::kNoValidate> iterator(&decoder, imm);
2210           uint32_t key = Pop().to<uint32_t>();
2211           uint32_t depth = 0;
2212           if (key >= imm.table_count) key = imm.table_count;
2213           for (uint32_t i = 0; i <= key; i++) {
2214             DCHECK(iterator.has_next());
2215             depth = iterator.next();
2216           }
2217           len = key + DoBreak(code, pc + key, static_cast<size_t>(depth));
2218           TRACE("  br[%u] => @%zu\n", key, pc + key + len);
2219           break;
2220         }
2221         case kExprReturn: {
2222           size_t arity = code->function->sig->return_count();
2223           if (!DoReturn(&decoder, &code, &pc, &limit, arity)) return;
2224           PAUSE_IF_BREAK_FLAG(AfterReturn);
2225           continue;
2226         }
2227         case kExprUnreachable: {
2228           return DoTrap(kTrapUnreachable, pc);
2229         }
2230         case kExprEnd: {
2231           break;
2232         }
2233         case kExprI32Const: {
2234           ImmI32Immediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
2235           Push(WasmValue(imm.value));
2236           len = 1 + imm.length;
2237           break;
2238         }
2239         case kExprI64Const: {
2240           ImmI64Immediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
2241           Push(WasmValue(imm.value));
2242           len = 1 + imm.length;
2243           break;
2244         }
2245         case kExprF32Const: {
2246           ImmF32Immediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
2247           Push(WasmValue(imm.value));
2248           len = 1 + imm.length;
2249           break;
2250         }
2251         case kExprF64Const: {
2252           ImmF64Immediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
2253           Push(WasmValue(imm.value));
2254           len = 1 + imm.length;
2255           break;
2256         }
2257         case kExprGetLocal: {
2258           LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
2259           Push(GetStackValue(frames_.back().sp + imm.index));
2260           len = 1 + imm.length;
2261           break;
2262         }
2263         case kExprSetLocal: {
2264           LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
2265           WasmValue val = Pop();
2266           SetStackValue(frames_.back().sp + imm.index, val);
2267           len = 1 + imm.length;
2268           break;
2269         }
2270         case kExprTeeLocal: {
2271           LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
2272           WasmValue val = Pop();
2273           SetStackValue(frames_.back().sp + imm.index, val);
2274           Push(val);
2275           len = 1 + imm.length;
2276           break;
2277         }
2278         case kExprDrop: {
2279           Pop();
2280           break;
2281         }
2282         case kExprCallFunction: {
2283           CallFunctionImmediate<Decoder::kNoValidate> imm(&decoder,
2284                                                           code->at(pc));
2285           InterpreterCode* target = codemap()->GetCode(imm.index);
2286           if (target->function->imported) {
2287             CommitPc(pc);
2288             ExternalCallResult result =
2289                 CallImportedFunction(target->function->func_index);
2290             switch (result.type) {
2291               case ExternalCallResult::INTERNAL:
2292                 // The import is a function of this instance. Call it directly.
2293                 target = result.interpreter_code;
2294                 DCHECK(!target->function->imported);
2295                 break;
2296               case ExternalCallResult::INVALID_FUNC:
2297               case ExternalCallResult::SIGNATURE_MISMATCH:
2298                 // Direct calls are checked statically.
2299                 UNREACHABLE();
2300               case ExternalCallResult::EXTERNAL_RETURNED:
2301                 PAUSE_IF_BREAK_FLAG(AfterCall);
2302                 len = 1 + imm.length;
2303                 break;
2304               case ExternalCallResult::EXTERNAL_UNWOUND:
2305                 return;
2306             }
2307             if (result.type != ExternalCallResult::INTERNAL) break;
2308           }
2309           // Execute an internal call.
2310           if (!DoCall(&decoder, target, &pc, &limit)) return;
2311           code = target;
2312           PAUSE_IF_BREAK_FLAG(AfterCall);
2313           continue;  // don't bump pc
2314         } break;
2315         case kExprCallIndirect: {
2316           CallIndirectImmediate<Decoder::kNoValidate> imm(&decoder,
2317                                                           code->at(pc));
2318           uint32_t entry_index = Pop().to<uint32_t>();
2319           // Assume only one table for now.
2320           DCHECK_LE(module()->tables.size(), 1u);
2321           CommitPc(pc);  // TODO(wasm): Be more disciplined about committing PC.
2322           ExternalCallResult result =
2323               CallIndirectFunction(0, entry_index, imm.sig_index);
2324           switch (result.type) {
2325             case ExternalCallResult::INTERNAL:
2326               // The import is a function of this instance. Call it directly.
2327               if (!DoCall(&decoder, result.interpreter_code, &pc, &limit))
2328                 return;
2329               code = result.interpreter_code;
2330               PAUSE_IF_BREAK_FLAG(AfterCall);
2331               continue;  // don't bump pc
2332             case ExternalCallResult::INVALID_FUNC:
2333               return DoTrap(kTrapFuncInvalid, pc);
2334             case ExternalCallResult::SIGNATURE_MISMATCH:
2335               return DoTrap(kTrapFuncSigMismatch, pc);
2336             case ExternalCallResult::EXTERNAL_RETURNED:
2337               PAUSE_IF_BREAK_FLAG(AfterCall);
2338               len = 1 + imm.length;
2339               break;
2340             case ExternalCallResult::EXTERNAL_UNWOUND:
2341               return;
2342           }
2343         } break;
2344         case kExprGetGlobal: {
2345           GlobalIndexImmediate<Decoder::kNoValidate> imm(&decoder,
2346                                                          code->at(pc));
2347           const WasmGlobal* global = &module()->globals[imm.index];
2348           byte* ptr = GetGlobalPtr(global);
2349           WasmValue val;
2350           switch (global->type) {
2351 #define CASE_TYPE(wasm, ctype)                                         \
2352   case kWasm##wasm:                                                    \
2353     val = WasmValue(                                                   \
2354         ReadLittleEndianValue<ctype>(reinterpret_cast<Address>(ptr))); \
2355     break;
2356             WASM_CTYPES(CASE_TYPE)
2357 #undef CASE_TYPE
2358             default:
2359               UNREACHABLE();
2360           }
2361           Push(val);
2362           len = 1 + imm.length;
2363           break;
2364         }
2365         case kExprSetGlobal: {
2366           GlobalIndexImmediate<Decoder::kNoValidate> imm(&decoder,
2367                                                          code->at(pc));
2368           const WasmGlobal* global = &module()->globals[imm.index];
2369           byte* ptr = GetGlobalPtr(global);
2370           WasmValue val = Pop();
2371           switch (global->type) {
2372 #define CASE_TYPE(wasm, ctype)                                    \
2373   case kWasm##wasm:                                               \
2374     WriteLittleEndianValue<ctype>(reinterpret_cast<Address>(ptr), \
2375                                   val.to<ctype>());               \
2376     break;
2377             WASM_CTYPES(CASE_TYPE)
2378 #undef CASE_TYPE
2379             default:
2380               UNREACHABLE();
2381           }
2382           len = 1 + imm.length;
2383           break;
2384         }
2385 
2386 #define LOAD_CASE(name, ctype, mtype, rep)                      \
2387   case kExpr##name: {                                           \
2388     if (!ExecuteLoad<ctype, mtype>(&decoder, code, pc, len,     \
2389                                    MachineRepresentation::rep)) \
2390       return;                                                   \
2391     break;                                                      \
2392   }
2393 
2394           LOAD_CASE(I32LoadMem8S, int32_t, int8_t, kWord8);
2395           LOAD_CASE(I32LoadMem8U, int32_t, uint8_t, kWord8);
2396           LOAD_CASE(I32LoadMem16S, int32_t, int16_t, kWord16);
2397           LOAD_CASE(I32LoadMem16U, int32_t, uint16_t, kWord16);
2398           LOAD_CASE(I64LoadMem8S, int64_t, int8_t, kWord8);
2399           LOAD_CASE(I64LoadMem8U, int64_t, uint8_t, kWord16);
2400           LOAD_CASE(I64LoadMem16S, int64_t, int16_t, kWord16);
2401           LOAD_CASE(I64LoadMem16U, int64_t, uint16_t, kWord16);
2402           LOAD_CASE(I64LoadMem32S, int64_t, int32_t, kWord32);
2403           LOAD_CASE(I64LoadMem32U, int64_t, uint32_t, kWord32);
2404           LOAD_CASE(I32LoadMem, int32_t, int32_t, kWord32);
2405           LOAD_CASE(I64LoadMem, int64_t, int64_t, kWord64);
2406           LOAD_CASE(F32LoadMem, Float32, uint32_t, kFloat32);
2407           LOAD_CASE(F64LoadMem, Float64, uint64_t, kFloat64);
2408 #undef LOAD_CASE
2409 
2410 #define STORE_CASE(name, ctype, mtype, rep)                      \
2411   case kExpr##name: {                                            \
2412     if (!ExecuteStore<ctype, mtype>(&decoder, code, pc, len,     \
2413                                     MachineRepresentation::rep)) \
2414       return;                                                    \
2415     break;                                                       \
2416   }
2417 
2418           STORE_CASE(I32StoreMem8, int32_t, int8_t, kWord8);
2419           STORE_CASE(I32StoreMem16, int32_t, int16_t, kWord16);
2420           STORE_CASE(I64StoreMem8, int64_t, int8_t, kWord8);
2421           STORE_CASE(I64StoreMem16, int64_t, int16_t, kWord16);
2422           STORE_CASE(I64StoreMem32, int64_t, int32_t, kWord32);
2423           STORE_CASE(I32StoreMem, int32_t, int32_t, kWord32);
2424           STORE_CASE(I64StoreMem, int64_t, int64_t, kWord64);
2425           STORE_CASE(F32StoreMem, Float32, uint32_t, kFloat32);
2426           STORE_CASE(F64StoreMem, Float64, uint64_t, kFloat64);
2427 #undef STORE_CASE
2428 
2429 #define ASMJS_LOAD_CASE(name, ctype, mtype, defval)                 \
2430   case kExpr##name: {                                               \
2431     uint32_t index = Pop().to<uint32_t>();                          \
2432     ctype result;                                                   \
2433     Address addr = BoundsCheckMem<mtype>(0, index);                 \
2434     if (!addr) {                                                    \
2435       result = defval;                                              \
2436     } else {                                                        \
2437       /* TODO(titzer): alignment for asmjs load mem? */             \
2438       result = static_cast<ctype>(*reinterpret_cast<mtype*>(addr)); \
2439     }                                                               \
2440     Push(WasmValue(result));                                        \
2441     break;                                                          \
2442   }
2443           ASMJS_LOAD_CASE(I32AsmjsLoadMem8S, int32_t, int8_t, 0);
2444           ASMJS_LOAD_CASE(I32AsmjsLoadMem8U, int32_t, uint8_t, 0);
2445           ASMJS_LOAD_CASE(I32AsmjsLoadMem16S, int32_t, int16_t, 0);
2446           ASMJS_LOAD_CASE(I32AsmjsLoadMem16U, int32_t, uint16_t, 0);
2447           ASMJS_LOAD_CASE(I32AsmjsLoadMem, int32_t, int32_t, 0);
2448           ASMJS_LOAD_CASE(F32AsmjsLoadMem, float, float,
2449                           std::numeric_limits<float>::quiet_NaN());
2450           ASMJS_LOAD_CASE(F64AsmjsLoadMem, double, double,
2451                           std::numeric_limits<double>::quiet_NaN());
2452 #undef ASMJS_LOAD_CASE
2453 
2454 #define ASMJS_STORE_CASE(name, ctype, mtype)                                   \
2455   case kExpr##name: {                                                          \
2456     WasmValue val = Pop();                                                     \
2457     uint32_t index = Pop().to<uint32_t>();                                     \
2458     Address addr = BoundsCheckMem<mtype>(0, index);                            \
2459     if (addr) {                                                                \
2460       *(reinterpret_cast<mtype*>(addr)) = static_cast<mtype>(val.to<ctype>()); \
2461     }                                                                          \
2462     Push(val);                                                                 \
2463     break;                                                                     \
2464   }
2465 
2466           ASMJS_STORE_CASE(I32AsmjsStoreMem8, int32_t, int8_t);
2467           ASMJS_STORE_CASE(I32AsmjsStoreMem16, int32_t, int16_t);
2468           ASMJS_STORE_CASE(I32AsmjsStoreMem, int32_t, int32_t);
2469           ASMJS_STORE_CASE(F32AsmjsStoreMem, float, float);
2470           ASMJS_STORE_CASE(F64AsmjsStoreMem, double, double);
2471 #undef ASMJS_STORE_CASE
2472         case kExprGrowMemory: {
2473           MemoryIndexImmediate<Decoder::kNoValidate> imm(&decoder,
2474                                                          code->at(pc));
2475           uint32_t delta_pages = Pop().to<uint32_t>();
2476           Handle<WasmMemoryObject> memory(instance_object_->memory_object(),
2477                                           instance_object_->GetIsolate());
2478           Isolate* isolate = memory->GetIsolate();
2479           int32_t result = WasmMemoryObject::Grow(isolate, memory, delta_pages);
2480           Push(WasmValue(result));
2481           len = 1 + imm.length;
2482           // Treat one grow_memory instruction like 1000 other instructions,
2483           // because it is a really expensive operation.
2484           if (max > 0) max = std::max(0, max - 1000);
2485           break;
2486         }
2487         case kExprMemorySize: {
2488           MemoryIndexImmediate<Decoder::kNoValidate> imm(&decoder,
2489                                                          code->at(pc));
2490           Push(WasmValue(static_cast<uint32_t>(instance_object_->memory_size() /
2491                                                kWasmPageSize)));
2492           len = 1 + imm.length;
2493           break;
2494         }
2495         // We need to treat kExprI32ReinterpretF32 and kExprI64ReinterpretF64
2496         // specially to guarantee that the quiet bit of a NaN is preserved on
2497         // ia32 by the reinterpret casts.
2498         case kExprI32ReinterpretF32: {
2499           WasmValue val = Pop();
2500           Push(WasmValue(ExecuteI32ReinterpretF32(val)));
2501           break;
2502         }
2503         case kExprI64ReinterpretF64: {
2504           WasmValue val = Pop();
2505           Push(WasmValue(ExecuteI64ReinterpretF64(val)));
2506           break;
2507         }
2508 #define SIGN_EXTENSION_CASE(name, wtype, ntype)        \
2509   case kExpr##name: {                                  \
2510     ntype val = static_cast<ntype>(Pop().to<wtype>()); \
2511     Push(WasmValue(static_cast<wtype>(val)));          \
2512     break;                                             \
2513   }
2514           SIGN_EXTENSION_CASE(I32SExtendI8, int32_t, int8_t);
2515           SIGN_EXTENSION_CASE(I32SExtendI16, int32_t, int16_t);
2516           SIGN_EXTENSION_CASE(I64SExtendI8, int64_t, int8_t);
2517           SIGN_EXTENSION_CASE(I64SExtendI16, int64_t, int16_t);
2518           SIGN_EXTENSION_CASE(I64SExtendI32, int64_t, int32_t);
2519 #undef SIGN_EXTENSION_CASE
2520         case kNumericPrefix: {
2521           ++len;
2522           if (!ExecuteNumericOp(opcode, &decoder, code, pc, len)) return;
2523           break;
2524         }
2525         case kAtomicPrefix: {
2526           if (!ExecuteAtomicOp(opcode, &decoder, code, pc, len)) return;
2527           break;
2528         }
2529         case kSimdPrefix: {
2530           ++len;
2531           if (!ExecuteSimdOp(opcode, &decoder, code, pc, len)) return;
2532           break;
2533         }
2534 
2535 #define EXECUTE_SIMPLE_BINOP(name, ctype, op)               \
2536   case kExpr##name: {                                       \
2537     WasmValue rval = Pop();                                 \
2538     WasmValue lval = Pop();                                 \
2539     auto result = lval.to<ctype>() op rval.to<ctype>();     \
2540     possible_nondeterminism_ |= has_nondeterminism(result); \
2541     Push(WasmValue(result));                                \
2542     break;                                                  \
2543   }
2544           FOREACH_SIMPLE_BINOP(EXECUTE_SIMPLE_BINOP)
2545 #undef EXECUTE_SIMPLE_BINOP
2546 
2547 #define EXECUTE_OTHER_BINOP(name, ctype)                    \
2548   case kExpr##name: {                                       \
2549     TrapReason trap = kTrapCount;                           \
2550     ctype rval = Pop().to<ctype>();                         \
2551     ctype lval = Pop().to<ctype>();                         \
2552     auto result = Execute##name(lval, rval, &trap);         \
2553     possible_nondeterminism_ |= has_nondeterminism(result); \
2554     if (trap != kTrapCount) return DoTrap(trap, pc);        \
2555     Push(WasmValue(result));                                \
2556     break;                                                  \
2557   }
2558           FOREACH_OTHER_BINOP(EXECUTE_OTHER_BINOP)
2559 #undef EXECUTE_OTHER_BINOP
2560 
2561 #define EXECUTE_UNOP(name, ctype, exec_fn)                  \
2562   case kExpr##name: {                                       \
2563     TrapReason trap = kTrapCount;                           \
2564     ctype val = Pop().to<ctype>();                          \
2565     auto result = exec_fn(val, &trap);                      \
2566     possible_nondeterminism_ |= has_nondeterminism(result); \
2567     if (trap != kTrapCount) return DoTrap(trap, pc);        \
2568     Push(WasmValue(result));                                \
2569     break;                                                  \
2570   }
2571 
2572 #define EXECUTE_OTHER_UNOP(name, ctype) EXECUTE_UNOP(name, ctype, Execute##name)
2573           FOREACH_OTHER_UNOP(EXECUTE_OTHER_UNOP)
2574 #undef EXECUTE_OTHER_UNOP
2575 
2576 #define EXECUTE_I32CONV_FLOATOP(name, out_type, in_type) \
2577   EXECUTE_UNOP(name, in_type, ExecuteConvert<out_type>)
2578           FOREACH_I32CONV_FLOATOP(EXECUTE_I32CONV_FLOATOP)
2579 #undef EXECUTE_I32CONV_FLOATOP
2580 #undef EXECUTE_UNOP
2581 
2582         default:
2583           FATAL("Unknown or unimplemented opcode #%d:%s", code->start[pc],
2584                 OpcodeName(code->start[pc]));
2585           UNREACHABLE();
2586       }
2587 
2588 #ifdef DEBUG
2589       if (!WasmOpcodes::IsControlOpcode(opcode)) {
2590         DCHECK_EQ(expected_new_stack_height, StackHeight());
2591       }
2592 #endif
2593 
2594       pc += len;
2595       if (pc == limit) {
2596         // Fell off end of code; do an implicit return.
2597         TRACE("@%-3zu: ImplicitReturn\n", pc);
2598         if (!DoReturn(&decoder, &code, &pc, &limit,
2599                       code->function->sig->return_count()))
2600           return;
2601         PAUSE_IF_BREAK_FLAG(AfterReturn);
2602       }
2603 #undef PAUSE_IF_BREAK_FLAG
2604     }
2605 
2606     state_ = WasmInterpreter::PAUSED;
2607     break_pc_ = hit_break ? pc : kInvalidPc;
2608     CommitPc(pc);
2609   }
2610 
Pop()2611   WasmValue Pop() {
2612     DCHECK_GT(frames_.size(), 0);
2613     DCHECK_GT(StackHeight(), frames_.back().llimit());  // can't pop into locals
2614     return *--sp_;
2615   }
2616 
PopN(int n)2617   void PopN(int n) {
2618     DCHECK_GE(StackHeight(), n);
2619     DCHECK_GT(frames_.size(), 0);
2620     // Check that we don't pop into locals.
2621     DCHECK_GE(StackHeight() - n, frames_.back().llimit());
2622     sp_ -= n;
2623   }
2624 
PopArity(size_t arity)2625   WasmValue PopArity(size_t arity) {
2626     if (arity == 0) return WasmValue();
2627     CHECK_EQ(1, arity);
2628     return Pop();
2629   }
2630 
Push(WasmValue val)2631   void Push(WasmValue val) {
2632     DCHECK_NE(kWasmStmt, val.type());
2633     DCHECK_LE(1, stack_limit_ - sp_);
2634     *sp_++ = val;
2635   }
2636 
Push(WasmValue * vals,size_t arity)2637   void Push(WasmValue* vals, size_t arity) {
2638     DCHECK_LE(arity, stack_limit_ - sp_);
2639     for (WasmValue *val = vals, *end = vals + arity; val != end; ++val) {
2640       DCHECK_NE(kWasmStmt, val->type());
2641     }
2642     memcpy(sp_, vals, arity * sizeof(*sp_));
2643     sp_ += arity;
2644   }
2645 
EnsureStackSpace(size_t size)2646   void EnsureStackSpace(size_t size) {
2647     if (V8_LIKELY(static_cast<size_t>(stack_limit_ - sp_) >= size)) return;
2648     size_t old_size = stack_limit_ - stack_.get();
2649     size_t requested_size =
2650         base::bits::RoundUpToPowerOfTwo64((sp_ - stack_.get()) + size);
2651     size_t new_size = Max(size_t{8}, Max(2 * old_size, requested_size));
2652     std::unique_ptr<WasmValue[]> new_stack(new WasmValue[new_size]);
2653     memcpy(new_stack.get(), stack_.get(), old_size * sizeof(*sp_));
2654     sp_ = new_stack.get() + (sp_ - stack_.get());
2655     stack_ = std::move(new_stack);
2656     stack_limit_ = stack_.get() + new_size;
2657   }
2658 
StackHeight()2659   sp_t StackHeight() { return sp_ - stack_.get(); }
2660 
TraceValueStack()2661   void TraceValueStack() {
2662 #ifdef DEBUG
2663     if (!FLAG_trace_wasm_interpreter) return;
2664     Frame* top = frames_.size() > 0 ? &frames_.back() : nullptr;
2665     sp_t sp = top ? top->sp : 0;
2666     sp_t plimit = top ? top->plimit() : 0;
2667     sp_t llimit = top ? top->llimit() : 0;
2668     for (size_t i = sp; i < StackHeight(); ++i) {
2669       if (i < plimit)
2670         PrintF(" p%zu:", i);
2671       else if (i < llimit)
2672         PrintF(" l%zu:", i);
2673       else
2674         PrintF(" s%zu:", i);
2675       WasmValue val = GetStackValue(i);
2676       switch (val.type()) {
2677         case kWasmI32:
2678           PrintF("i32:%d", val.to<int32_t>());
2679           break;
2680         case kWasmI64:
2681           PrintF("i64:%" PRId64 "", val.to<int64_t>());
2682           break;
2683         case kWasmF32:
2684           PrintF("f32:%f", val.to<float>());
2685           break;
2686         case kWasmF64:
2687           PrintF("f64:%lf", val.to<double>());
2688           break;
2689         case kWasmStmt:
2690           PrintF("void");
2691           break;
2692         default:
2693           UNREACHABLE();
2694           break;
2695       }
2696     }
2697 #endif  // DEBUG
2698   }
2699 
TryHandleException(Isolate * isolate)2700   ExternalCallResult TryHandleException(Isolate* isolate) {
2701     if (HandleException(isolate) == WasmInterpreter::Thread::UNWOUND) {
2702       return {ExternalCallResult::EXTERNAL_UNWOUND};
2703     }
2704     return {ExternalCallResult::EXTERNAL_RETURNED};
2705   }
2706 
CallExternalWasmFunction(Isolate * isolate,Handle<WasmInstanceObject> instance,const WasmCode * code,FunctionSig * sig)2707   ExternalCallResult CallExternalWasmFunction(
2708       Isolate* isolate, Handle<WasmInstanceObject> instance,
2709       const WasmCode* code, FunctionSig* sig) {
2710     if (code->kind() == WasmCode::kWasmToJsWrapper &&
2711         !IsJSCompatibleSignature(sig)) {
2712       isolate->Throw(*isolate->factory()->NewTypeError(
2713           MessageTemplate::kWasmTrapTypeError));
2714       return TryHandleException(isolate);
2715     }
2716 
2717     Handle<WasmDebugInfo> debug_info(instance_object_->debug_info(), isolate);
2718     Handle<JSFunction> wasm_entry =
2719         WasmDebugInfo::GetCWasmEntry(debug_info, sig);
2720 
2721     TRACE("  => Calling external wasm function\n");
2722 
2723     // Copy the arguments to one buffer.
2724     // TODO(clemensh): Introduce a helper for all argument buffer
2725     // con-/destruction.
2726     int num_args = static_cast<int>(sig->parameter_count());
2727     std::vector<uint8_t> arg_buffer(num_args * 8);
2728     size_t offset = 0;
2729     WasmValue* wasm_args = sp_ - num_args;
2730     for (int i = 0; i < num_args; ++i) {
2731       int param_size = ValueTypes::ElementSizeInBytes(sig->GetParam(i));
2732       if (arg_buffer.size() < offset + param_size) {
2733         arg_buffer.resize(std::max(2 * arg_buffer.size(), offset + param_size));
2734       }
2735       Address address = reinterpret_cast<Address>(arg_buffer.data()) + offset;
2736       switch (sig->GetParam(i)) {
2737         case kWasmI32:
2738           WriteUnalignedValue(address, wasm_args[i].to<uint32_t>());
2739           break;
2740         case kWasmI64:
2741           WriteUnalignedValue(address, wasm_args[i].to<uint64_t>());
2742           break;
2743         case kWasmF32:
2744           WriteUnalignedValue(address, wasm_args[i].to<float>());
2745           break;
2746         case kWasmF64:
2747           WriteUnalignedValue(address, wasm_args[i].to<double>());
2748           break;
2749         default:
2750           UNIMPLEMENTED();
2751       }
2752       offset += param_size;
2753     }
2754 
2755     // Ensure that there is enough space in the arg_buffer to hold the return
2756     // value(s).
2757     size_t return_size = 0;
2758     for (ValueType t : sig->returns()) {
2759       return_size += ValueTypes::ElementSizeInBytes(t);
2760     }
2761     if (arg_buffer.size() < return_size) {
2762       arg_buffer.resize(return_size);
2763     }
2764 
2765     // Wrap the arg_buffer data pointer in a handle. As
2766     // this is an aligned pointer, to the GC it will look like a Smi.
2767     Handle<Object> arg_buffer_obj(reinterpret_cast<Object*>(arg_buffer.data()),
2768                                   isolate);
2769     DCHECK(!arg_buffer_obj->IsHeapObject());
2770 
2771     static_assert(compiler::CWasmEntryParameters::kNumParameters == 3,
2772                   "code below needs adaption");
2773     Handle<Object> args[compiler::CWasmEntryParameters::kNumParameters];
2774     args[compiler::CWasmEntryParameters::kCodeObject] = Handle<Object>::cast(
2775         isolate->factory()->NewForeign(code->instruction_start(), TENURED));
2776     args[compiler::CWasmEntryParameters::kWasmInstance] = instance;
2777     args[compiler::CWasmEntryParameters::kArgumentsBuffer] = arg_buffer_obj;
2778 
2779     Handle<Object> receiver = isolate->factory()->undefined_value();
2780     trap_handler::SetThreadInWasm();
2781     MaybeHandle<Object> maybe_retval =
2782         Execution::Call(isolate, wasm_entry, receiver, arraysize(args), args);
2783     TRACE("  => External wasm function returned%s\n",
2784           maybe_retval.is_null() ? " with exception" : "");
2785 
2786     if (maybe_retval.is_null()) {
2787       // JSEntryStub may through a stack overflow before we actually get to wasm
2788       // code or back to the interpreter, meaning the thread-in-wasm flag won't
2789       // be cleared.
2790       if (trap_handler::IsThreadInWasm()) {
2791         trap_handler::ClearThreadInWasm();
2792       }
2793       return TryHandleException(isolate);
2794     }
2795 
2796     trap_handler::ClearThreadInWasm();
2797 
2798     // Pop arguments off the stack.
2799     sp_ -= num_args;
2800     // Push return values.
2801     if (sig->return_count() > 0) {
2802       // TODO(wasm): Handle multiple returns.
2803       DCHECK_EQ(1, sig->return_count());
2804       Address address = reinterpret_cast<Address>(arg_buffer.data());
2805       switch (sig->GetReturn()) {
2806         case kWasmI32:
2807           Push(WasmValue(ReadUnalignedValue<uint32_t>(address)));
2808           break;
2809         case kWasmI64:
2810           Push(WasmValue(ReadUnalignedValue<uint64_t>(address)));
2811           break;
2812         case kWasmF32:
2813           Push(WasmValue(ReadUnalignedValue<float>(address)));
2814           break;
2815         case kWasmF64:
2816           Push(WasmValue(ReadUnalignedValue<double>(address)));
2817           break;
2818         default:
2819           UNIMPLEMENTED();
2820       }
2821     }
2822     return {ExternalCallResult::EXTERNAL_RETURNED};
2823   }
2824 
GetTargetCode(WasmCodeManager * code_manager,Address target)2825   static WasmCode* GetTargetCode(WasmCodeManager* code_manager,
2826                                  Address target) {
2827     NativeModule* native_module = code_manager->LookupNativeModule(target);
2828     if (native_module->is_jump_table_slot(target)) {
2829       uint32_t func_index =
2830           native_module->GetFunctionIndexFromJumpTableSlot(target);
2831       return native_module->code(func_index);
2832     }
2833     WasmCode* code = native_module->Lookup(target);
2834     DCHECK_EQ(code->instruction_start(), target);
2835     return code;
2836   }
2837 
CallImportedFunction(uint32_t function_index)2838   ExternalCallResult CallImportedFunction(uint32_t function_index) {
2839     // Use a new HandleScope to avoid leaking / accumulating handles in the
2840     // outer scope.
2841     Isolate* isolate = instance_object_->GetIsolate();
2842     HandleScope handle_scope(isolate);
2843 
2844     DCHECK_GT(module()->num_imported_functions, function_index);
2845     Handle<WasmInstanceObject> instance;
2846     ImportedFunctionEntry entry(instance_object_, function_index);
2847     instance = handle(entry.instance(), isolate);
2848     WasmCode* code =
2849         GetTargetCode(isolate->wasm_engine()->code_manager(), entry.target());
2850     FunctionSig* sig = codemap()->module()->functions[function_index].sig;
2851     return CallExternalWasmFunction(isolate, instance, code, sig);
2852   }
2853 
CallIndirectFunction(uint32_t table_index,uint32_t entry_index,uint32_t sig_index)2854   ExternalCallResult CallIndirectFunction(uint32_t table_index,
2855                                           uint32_t entry_index,
2856                                           uint32_t sig_index) {
2857     if (codemap()->call_indirect_through_module()) {
2858       // Rely on the information stored in the WasmModule.
2859       InterpreterCode* code =
2860           codemap()->GetIndirectCode(table_index, entry_index);
2861       if (!code) return {ExternalCallResult::INVALID_FUNC};
2862       if (code->function->sig_index != sig_index) {
2863         // If not an exact match, we have to do a canonical check.
2864         int function_canonical_id =
2865             module()->signature_ids[code->function->sig_index];
2866         int expected_canonical_id = module()->signature_ids[sig_index];
2867         DCHECK_EQ(function_canonical_id,
2868                   module()->signature_map.Find(*code->function->sig));
2869         if (function_canonical_id != expected_canonical_id) {
2870           return {ExternalCallResult::SIGNATURE_MISMATCH};
2871         }
2872       }
2873       return {ExternalCallResult::INTERNAL, code};
2874     }
2875 
2876     Isolate* isolate = instance_object_->GetIsolate();
2877     uint32_t expected_sig_id = module()->signature_ids[sig_index];
2878     DCHECK_EQ(expected_sig_id,
2879               module()->signature_map.Find(*module()->signatures[sig_index]));
2880 
2881     // The function table is stored in the instance.
2882     // TODO(wasm): the wasm interpreter currently supports only one table.
2883     CHECK_EQ(0, table_index);
2884     // Bounds check against table size.
2885     if (entry_index >= instance_object_->indirect_function_table_size()) {
2886       return {ExternalCallResult::INVALID_FUNC};
2887     }
2888 
2889     IndirectFunctionTableEntry entry(instance_object_, entry_index);
2890     // Signature check.
2891     if (entry.sig_id() != static_cast<int32_t>(expected_sig_id)) {
2892       return {ExternalCallResult::SIGNATURE_MISMATCH};
2893     }
2894 
2895     Handle<WasmInstanceObject> instance = handle(entry.instance(), isolate);
2896     WasmCode* code =
2897         GetTargetCode(isolate->wasm_engine()->code_manager(), entry.target());
2898 
2899     // Call either an internal or external WASM function.
2900     HandleScope scope(isolate);
2901     FunctionSig* signature = module()->signatures[sig_index];
2902 
2903     if (code->kind() == WasmCode::kFunction) {
2904       if (!instance_object_.is_identical_to(instance)) {
2905         // Cross instance call.
2906         return CallExternalWasmFunction(isolate, instance, code, signature);
2907       }
2908       return {ExternalCallResult::INTERNAL, codemap()->GetCode(code->index())};
2909     }
2910 
2911     // Call to external function.
2912     if (code->kind() == WasmCode::kInterpreterEntry ||
2913         code->kind() == WasmCode::kWasmToJsWrapper) {
2914       return CallExternalWasmFunction(isolate, instance, code, signature);
2915     }
2916     return {ExternalCallResult::INVALID_FUNC};
2917   }
2918 
current_activation()2919   inline Activation current_activation() {
2920     return activations_.empty() ? Activation(0, 0) : activations_.back();
2921   }
2922 };
2923 
2924 class InterpretedFrameImpl {
2925  public:
InterpretedFrameImpl(ThreadImpl * thread,int index)2926   InterpretedFrameImpl(ThreadImpl* thread, int index)
2927       : thread_(thread), index_(index) {
2928     DCHECK_LE(0, index);
2929   }
2930 
function() const2931   const WasmFunction* function() const { return frame()->code->function; }
2932 
pc() const2933   int pc() const {
2934     DCHECK_LE(0, frame()->pc);
2935     DCHECK_GE(kMaxInt, frame()->pc);
2936     return static_cast<int>(frame()->pc);
2937   }
2938 
GetParameterCount() const2939   int GetParameterCount() const {
2940     DCHECK_GE(kMaxInt, function()->sig->parameter_count());
2941     return static_cast<int>(function()->sig->parameter_count());
2942   }
2943 
GetLocalCount() const2944   int GetLocalCount() const {
2945     size_t num_locals = function()->sig->parameter_count() +
2946                         frame()->code->locals.type_list.size();
2947     DCHECK_GE(kMaxInt, num_locals);
2948     return static_cast<int>(num_locals);
2949   }
2950 
GetStackHeight() const2951   int GetStackHeight() const {
2952     bool is_top_frame =
2953         static_cast<size_t>(index_) + 1 == thread_->frames_.size();
2954     size_t stack_limit =
2955         is_top_frame ? thread_->StackHeight() : thread_->frames_[index_ + 1].sp;
2956     DCHECK_LE(frame()->sp, stack_limit);
2957     size_t frame_size = stack_limit - frame()->sp;
2958     DCHECK_LE(GetLocalCount(), frame_size);
2959     return static_cast<int>(frame_size) - GetLocalCount();
2960   }
2961 
GetLocalValue(int index) const2962   WasmValue GetLocalValue(int index) const {
2963     DCHECK_LE(0, index);
2964     DCHECK_GT(GetLocalCount(), index);
2965     return thread_->GetStackValue(static_cast<int>(frame()->sp) + index);
2966   }
2967 
GetStackValue(int index) const2968   WasmValue GetStackValue(int index) const {
2969     DCHECK_LE(0, index);
2970     // Index must be within the number of stack values of this frame.
2971     DCHECK_GT(GetStackHeight(), index);
2972     return thread_->GetStackValue(static_cast<int>(frame()->sp) +
2973                                   GetLocalCount() + index);
2974   }
2975 
2976  private:
2977   ThreadImpl* thread_;
2978   int index_;
2979 
frame() const2980   ThreadImpl::Frame* frame() const {
2981     DCHECK_GT(thread_->frames_.size(), index_);
2982     return &thread_->frames_[index_];
2983   }
2984 };
2985 
2986 // Converters between WasmInterpreter::Thread and WasmInterpreter::ThreadImpl.
2987 // Thread* is the public interface, without knowledge of the object layout.
2988 // This cast is potentially risky, but as long as we always cast it back before
2989 // accessing any data, it should be fine. UBSan is not complaining.
ToThread(ThreadImpl * impl)2990 WasmInterpreter::Thread* ToThread(ThreadImpl* impl) {
2991   return reinterpret_cast<WasmInterpreter::Thread*>(impl);
2992 }
ToImpl(WasmInterpreter::Thread * thread)2993 ThreadImpl* ToImpl(WasmInterpreter::Thread* thread) {
2994   return reinterpret_cast<ThreadImpl*>(thread);
2995 }
2996 
2997 // Same conversion for InterpretedFrame and InterpretedFrameImpl.
ToFrame(InterpretedFrameImpl * impl)2998 InterpretedFrame* ToFrame(InterpretedFrameImpl* impl) {
2999   return reinterpret_cast<InterpretedFrame*>(impl);
3000 }
ToImpl(const InterpretedFrame * frame)3001 const InterpretedFrameImpl* ToImpl(const InterpretedFrame* frame) {
3002   return reinterpret_cast<const InterpretedFrameImpl*>(frame);
3003 }
3004 
3005 }  // namespace
3006 
3007 //============================================================================
3008 // Implementation of the pimpl idiom for WasmInterpreter::Thread.
3009 // Instead of placing a pointer to the ThreadImpl inside of the Thread object,
3010 // we just reinterpret_cast them. ThreadImpls are only allocated inside this
3011 // translation unit anyway.
3012 //============================================================================
state()3013 WasmInterpreter::State WasmInterpreter::Thread::state() {
3014   return ToImpl(this)->state();
3015 }
InitFrame(const WasmFunction * function,WasmValue * args)3016 void WasmInterpreter::Thread::InitFrame(const WasmFunction* function,
3017                                         WasmValue* args) {
3018   ToImpl(this)->InitFrame(function, args);
3019 }
Run(int num_steps)3020 WasmInterpreter::State WasmInterpreter::Thread::Run(int num_steps) {
3021   return ToImpl(this)->Run(num_steps);
3022 }
Pause()3023 void WasmInterpreter::Thread::Pause() { return ToImpl(this)->Pause(); }
Reset()3024 void WasmInterpreter::Thread::Reset() { return ToImpl(this)->Reset(); }
3025 WasmInterpreter::Thread::ExceptionHandlingResult
HandleException(Isolate * isolate)3026 WasmInterpreter::Thread::HandleException(Isolate* isolate) {
3027   return ToImpl(this)->HandleException(isolate);
3028 }
GetBreakpointPc()3029 pc_t WasmInterpreter::Thread::GetBreakpointPc() {
3030   return ToImpl(this)->GetBreakpointPc();
3031 }
GetFrameCount()3032 int WasmInterpreter::Thread::GetFrameCount() {
3033   return ToImpl(this)->GetFrameCount();
3034 }
GetFrame(int index)3035 WasmInterpreter::FramePtr WasmInterpreter::Thread::GetFrame(int index) {
3036   DCHECK_LE(0, index);
3037   DCHECK_GT(GetFrameCount(), index);
3038   return FramePtr(ToFrame(new InterpretedFrameImpl(ToImpl(this), index)));
3039 }
GetReturnValue(int index)3040 WasmValue WasmInterpreter::Thread::GetReturnValue(int index) {
3041   return ToImpl(this)->GetReturnValue(index);
3042 }
GetTrapReason()3043 TrapReason WasmInterpreter::Thread::GetTrapReason() {
3044   return ToImpl(this)->GetTrapReason();
3045 }
PossibleNondeterminism()3046 bool WasmInterpreter::Thread::PossibleNondeterminism() {
3047   return ToImpl(this)->PossibleNondeterminism();
3048 }
NumInterpretedCalls()3049 uint64_t WasmInterpreter::Thread::NumInterpretedCalls() {
3050   return ToImpl(this)->NumInterpretedCalls();
3051 }
AddBreakFlags(uint8_t flags)3052 void WasmInterpreter::Thread::AddBreakFlags(uint8_t flags) {
3053   ToImpl(this)->AddBreakFlags(flags);
3054 }
ClearBreakFlags()3055 void WasmInterpreter::Thread::ClearBreakFlags() {
3056   ToImpl(this)->ClearBreakFlags();
3057 }
NumActivations()3058 uint32_t WasmInterpreter::Thread::NumActivations() {
3059   return ToImpl(this)->NumActivations();
3060 }
StartActivation()3061 uint32_t WasmInterpreter::Thread::StartActivation() {
3062   return ToImpl(this)->StartActivation();
3063 }
FinishActivation(uint32_t id)3064 void WasmInterpreter::Thread::FinishActivation(uint32_t id) {
3065   ToImpl(this)->FinishActivation(id);
3066 }
ActivationFrameBase(uint32_t id)3067 uint32_t WasmInterpreter::Thread::ActivationFrameBase(uint32_t id) {
3068   return ToImpl(this)->ActivationFrameBase(id);
3069 }
3070 
3071 //============================================================================
3072 // The implementation details of the interpreter.
3073 //============================================================================
3074 class WasmInterpreterInternals : public ZoneObject {
3075  public:
3076   // Create a copy of the module bytes for the interpreter, since the passed
3077   // pointer might be invalidated after constructing the interpreter.
3078   const ZoneVector<uint8_t> module_bytes_;
3079   CodeMap codemap_;
3080   ZoneVector<ThreadImpl> threads_;
3081 
WasmInterpreterInternals(Zone * zone,const WasmModule * module,const ModuleWireBytes & wire_bytes,Handle<WasmInstanceObject> instance_object)3082   WasmInterpreterInternals(Zone* zone, const WasmModule* module,
3083                            const ModuleWireBytes& wire_bytes,
3084                            Handle<WasmInstanceObject> instance_object)
3085       : module_bytes_(wire_bytes.start(), wire_bytes.end(), zone),
3086         codemap_(module, module_bytes_.data(), zone),
3087         threads_(zone) {
3088     threads_.emplace_back(zone, &codemap_, instance_object);
3089   }
3090 };
3091 
3092 namespace {
3093 // TODO(wasm): a finalizer is only required to delete the global handle.
GlobalHandleDeleter(const v8::WeakCallbackInfo<void> & data)3094 void GlobalHandleDeleter(const v8::WeakCallbackInfo<void>& data) {
3095   GlobalHandles::Destroy(reinterpret_cast<Object**>(
3096       reinterpret_cast<JSObject**>(data.GetParameter())));
3097 }
3098 
MakeWeak(Isolate * isolate,Handle<WasmInstanceObject> instance_object)3099 Handle<WasmInstanceObject> MakeWeak(
3100     Isolate* isolate, Handle<WasmInstanceObject> instance_object) {
3101   Handle<Object> handle = isolate->global_handles()->Create(*instance_object);
3102   // TODO(wasm): use a phantom handle in the WasmInterpreter.
3103   GlobalHandles::MakeWeak(handle.location(), handle.location(),
3104                           &GlobalHandleDeleter,
3105                           v8::WeakCallbackType::kFinalizer);
3106   return Handle<WasmInstanceObject>::cast(handle);
3107 }
3108 }  // namespace
3109 
3110 //============================================================================
3111 // Implementation of the public interface of the interpreter.
3112 //============================================================================
WasmInterpreter(Isolate * isolate,const WasmModule * module,const ModuleWireBytes & wire_bytes,Handle<WasmInstanceObject> instance_object)3113 WasmInterpreter::WasmInterpreter(Isolate* isolate, const WasmModule* module,
3114                                  const ModuleWireBytes& wire_bytes,
3115                                  Handle<WasmInstanceObject> instance_object)
3116     : zone_(isolate->allocator(), ZONE_NAME),
3117       internals_(new (&zone_) WasmInterpreterInternals(
3118           &zone_, module, wire_bytes, MakeWeak(isolate, instance_object))) {}
3119 
~WasmInterpreter()3120 WasmInterpreter::~WasmInterpreter() { internals_->~WasmInterpreterInternals(); }
3121 
Run()3122 void WasmInterpreter::Run() { internals_->threads_[0].Run(); }
3123 
Pause()3124 void WasmInterpreter::Pause() { internals_->threads_[0].Pause(); }
3125 
SetBreakpoint(const WasmFunction * function,pc_t pc,bool enabled)3126 bool WasmInterpreter::SetBreakpoint(const WasmFunction* function, pc_t pc,
3127                                     bool enabled) {
3128   InterpreterCode* code = internals_->codemap_.GetCode(function);
3129   size_t size = static_cast<size_t>(code->end - code->start);
3130   // Check bounds for {pc}.
3131   if (pc < code->locals.encoded_size || pc >= size) return false;
3132   // Make a copy of the code before enabling a breakpoint.
3133   if (enabled && code->orig_start == code->start) {
3134     code->start = reinterpret_cast<byte*>(zone_.New(size));
3135     memcpy(code->start, code->orig_start, size);
3136     code->end = code->start + size;
3137   }
3138   bool prev = code->start[pc] == kInternalBreakpoint;
3139   if (enabled) {
3140     code->start[pc] = kInternalBreakpoint;
3141   } else {
3142     code->start[pc] = code->orig_start[pc];
3143   }
3144   return prev;
3145 }
3146 
GetBreakpoint(const WasmFunction * function,pc_t pc)3147 bool WasmInterpreter::GetBreakpoint(const WasmFunction* function, pc_t pc) {
3148   InterpreterCode* code = internals_->codemap_.GetCode(function);
3149   size_t size = static_cast<size_t>(code->end - code->start);
3150   // Check bounds for {pc}.
3151   if (pc < code->locals.encoded_size || pc >= size) return false;
3152   // Check if a breakpoint is present at that place in the code.
3153   return code->start[pc] == kInternalBreakpoint;
3154 }
3155 
SetTracing(const WasmFunction * function,bool enabled)3156 bool WasmInterpreter::SetTracing(const WasmFunction* function, bool enabled) {
3157   UNIMPLEMENTED();
3158   return false;
3159 }
3160 
GetThreadCount()3161 int WasmInterpreter::GetThreadCount() {
3162   return 1;  // only one thread for now.
3163 }
3164 
GetThread(int id)3165 WasmInterpreter::Thread* WasmInterpreter::GetThread(int id) {
3166   CHECK_EQ(0, id);  // only one thread for now.
3167   return ToThread(&internals_->threads_[id]);
3168 }
3169 
AddFunctionForTesting(const WasmFunction * function)3170 void WasmInterpreter::AddFunctionForTesting(const WasmFunction* function) {
3171   internals_->codemap_.AddFunction(function, nullptr, nullptr);
3172 }
3173 
SetFunctionCodeForTesting(const WasmFunction * function,const byte * start,const byte * end)3174 void WasmInterpreter::SetFunctionCodeForTesting(const WasmFunction* function,
3175                                                 const byte* start,
3176                                                 const byte* end) {
3177   internals_->codemap_.SetFunctionCode(function, start, end);
3178 }
3179 
SetCallIndirectTestMode()3180 void WasmInterpreter::SetCallIndirectTestMode() {
3181   internals_->codemap_.set_call_indirect_through_module(true);
3182 }
3183 
ComputeControlTransfersForTesting(Zone * zone,const WasmModule * module,const byte * start,const byte * end)3184 ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
3185     Zone* zone, const WasmModule* module, const byte* start, const byte* end) {
3186   // Create some dummy structures, to avoid special-casing the implementation
3187   // just for testing.
3188   FunctionSig sig(0, 0, nullptr);
3189   WasmFunction function{&sig, 0, 0, {0, 0}, false, false};
3190   InterpreterCode code{
3191       &function, BodyLocalDecls(zone), start, end, nullptr, nullptr, nullptr};
3192 
3193   // Now compute and return the control transfers.
3194   SideTable side_table(zone, module, &code);
3195   return side_table.map_;
3196 }
3197 
3198 //============================================================================
3199 // Implementation of the frame inspection interface.
3200 //============================================================================
function() const3201 const WasmFunction* InterpretedFrame::function() const {
3202   return ToImpl(this)->function();
3203 }
pc() const3204 int InterpretedFrame::pc() const { return ToImpl(this)->pc(); }
GetParameterCount() const3205 int InterpretedFrame::GetParameterCount() const {
3206   return ToImpl(this)->GetParameterCount();
3207 }
GetLocalCount() const3208 int InterpretedFrame::GetLocalCount() const {
3209   return ToImpl(this)->GetLocalCount();
3210 }
GetStackHeight() const3211 int InterpretedFrame::GetStackHeight() const {
3212   return ToImpl(this)->GetStackHeight();
3213 }
GetLocalValue(int index) const3214 WasmValue InterpretedFrame::GetLocalValue(int index) const {
3215   return ToImpl(this)->GetLocalValue(index);
3216 }
GetStackValue(int index) const3217 WasmValue InterpretedFrame::GetStackValue(int index) const {
3218   return ToImpl(this)->GetStackValue(index);
3219 }
operator ()(InterpretedFrame * ptr)3220 void InterpretedFrameDeleter::operator()(InterpretedFrame* ptr) {
3221   delete ToImpl(ptr);
3222 }
3223 
3224 #undef TRACE
3225 #undef LANE
3226 #undef FOREACH_INTERNAL_OPCODE
3227 #undef WASM_CTYPES
3228 #undef FOREACH_SIMPLE_BINOP
3229 #undef FOREACH_OTHER_BINOP
3230 #undef FOREACH_I32CONV_FLOATOP
3231 #undef FOREACH_OTHER_UNOP
3232 
3233 }  // namespace wasm
3234 }  // namespace internal
3235 }  // namespace v8
3236