• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <type_traits>
6 
7 #include "src/wasm/wasm-interpreter.h"
8 
9 #include "src/conversions.h"
10 #include "src/objects-inl.h"
11 #include "src/utils.h"
12 #include "src/wasm/decoder.h"
13 #include "src/wasm/function-body-decoder-impl.h"
14 #include "src/wasm/function-body-decoder.h"
15 #include "src/wasm/wasm-external-refs.h"
16 #include "src/wasm/wasm-limits.h"
17 #include "src/wasm/wasm-module.h"
18 
19 #include "src/zone/accounting-allocator.h"
20 #include "src/zone/zone-containers.h"
21 
22 namespace v8 {
23 namespace internal {
24 namespace wasm {
25 
26 #if DEBUG
27 #define TRACE(...)                                        \
28   do {                                                    \
29     if (FLAG_trace_wasm_interpreter) PrintF(__VA_ARGS__); \
30   } while (false)
31 #else
32 #define TRACE(...)
33 #endif
34 
35 #define FOREACH_INTERNAL_OPCODE(V) V(Breakpoint, 0xFF)
36 
37 #define FOREACH_SIMPLE_BINOP(V) \
38   V(I32Add, uint32_t, +)        \
39   V(I32Sub, uint32_t, -)        \
40   V(I32Mul, uint32_t, *)        \
41   V(I32And, uint32_t, &)        \
42   V(I32Ior, uint32_t, |)        \
43   V(I32Xor, uint32_t, ^)        \
44   V(I32Eq, uint32_t, ==)        \
45   V(I32Ne, uint32_t, !=)        \
46   V(I32LtU, uint32_t, <)        \
47   V(I32LeU, uint32_t, <=)       \
48   V(I32GtU, uint32_t, >)        \
49   V(I32GeU, uint32_t, >=)       \
50   V(I32LtS, int32_t, <)         \
51   V(I32LeS, int32_t, <=)        \
52   V(I32GtS, int32_t, >)         \
53   V(I32GeS, int32_t, >=)        \
54   V(I64Add, uint64_t, +)        \
55   V(I64Sub, uint64_t, -)        \
56   V(I64Mul, uint64_t, *)        \
57   V(I64And, uint64_t, &)        \
58   V(I64Ior, uint64_t, |)        \
59   V(I64Xor, uint64_t, ^)        \
60   V(I64Eq, uint64_t, ==)        \
61   V(I64Ne, uint64_t, !=)        \
62   V(I64LtU, uint64_t, <)        \
63   V(I64LeU, uint64_t, <=)       \
64   V(I64GtU, uint64_t, >)        \
65   V(I64GeU, uint64_t, >=)       \
66   V(I64LtS, int64_t, <)         \
67   V(I64LeS, int64_t, <=)        \
68   V(I64GtS, int64_t, >)         \
69   V(I64GeS, int64_t, >=)        \
70   V(F32Add, float, +)           \
71   V(F32Sub, float, -)           \
72   V(F32Eq, float, ==)           \
73   V(F32Ne, float, !=)           \
74   V(F32Lt, float, <)            \
75   V(F32Le, float, <=)           \
76   V(F32Gt, float, >)            \
77   V(F32Ge, float, >=)           \
78   V(F64Add, double, +)          \
79   V(F64Sub, double, -)          \
80   V(F64Eq, double, ==)          \
81   V(F64Ne, double, !=)          \
82   V(F64Lt, double, <)           \
83   V(F64Le, double, <=)          \
84   V(F64Gt, double, >)           \
85   V(F64Ge, double, >=)          \
86   V(F32Mul, float, *)           \
87   V(F64Mul, double, *)          \
88   V(F32Div, float, /)           \
89   V(F64Div, double, /)
90 
91 #define FOREACH_OTHER_BINOP(V) \
92   V(I32DivS, int32_t)          \
93   V(I32DivU, uint32_t)         \
94   V(I32RemS, int32_t)          \
95   V(I32RemU, uint32_t)         \
96   V(I32Shl, uint32_t)          \
97   V(I32ShrU, uint32_t)         \
98   V(I32ShrS, int32_t)          \
99   V(I64DivS, int64_t)          \
100   V(I64DivU, uint64_t)         \
101   V(I64RemS, int64_t)          \
102   V(I64RemU, uint64_t)         \
103   V(I64Shl, uint64_t)          \
104   V(I64ShrU, uint64_t)         \
105   V(I64ShrS, int64_t)          \
106   V(I32Ror, int32_t)           \
107   V(I32Rol, int32_t)           \
108   V(I64Ror, int64_t)           \
109   V(I64Rol, int64_t)           \
110   V(F32Min, float)             \
111   V(F32Max, float)             \
112   V(F64Min, double)            \
113   V(F64Max, double)            \
114   V(I32AsmjsDivS, int32_t)     \
115   V(I32AsmjsDivU, uint32_t)    \
116   V(I32AsmjsRemS, int32_t)     \
117   V(I32AsmjsRemU, uint32_t)
118 
119 #define FOREACH_OTHER_UNOP(V)    \
120   V(I32Clz, uint32_t)            \
121   V(I32Ctz, uint32_t)            \
122   V(I32Popcnt, uint32_t)         \
123   V(I32Eqz, uint32_t)            \
124   V(I64Clz, uint64_t)            \
125   V(I64Ctz, uint64_t)            \
126   V(I64Popcnt, uint64_t)         \
127   V(I64Eqz, uint64_t)            \
128   V(F32Abs, float)               \
129   V(F32Neg, float)               \
130   V(F32Ceil, float)              \
131   V(F32Floor, float)             \
132   V(F32Trunc, float)             \
133   V(F32NearestInt, float)        \
134   V(F64Abs, double)              \
135   V(F64Neg, double)              \
136   V(F64Ceil, double)             \
137   V(F64Floor, double)            \
138   V(F64Trunc, double)            \
139   V(F64NearestInt, double)       \
140   V(I32SConvertF32, float)       \
141   V(I32SConvertF64, double)      \
142   V(I32UConvertF32, float)       \
143   V(I32UConvertF64, double)      \
144   V(I32ConvertI64, int64_t)      \
145   V(I64SConvertF32, float)       \
146   V(I64SConvertF64, double)      \
147   V(I64UConvertF32, float)       \
148   V(I64UConvertF64, double)      \
149   V(I64SConvertI32, int32_t)     \
150   V(I64UConvertI32, uint32_t)    \
151   V(F32SConvertI32, int32_t)     \
152   V(F32UConvertI32, uint32_t)    \
153   V(F32SConvertI64, int64_t)     \
154   V(F32UConvertI64, uint64_t)    \
155   V(F32ConvertF64, double)       \
156   V(F32ReinterpretI32, int32_t)  \
157   V(F64SConvertI32, int32_t)     \
158   V(F64UConvertI32, uint32_t)    \
159   V(F64SConvertI64, int64_t)     \
160   V(F64UConvertI64, uint64_t)    \
161   V(F64ConvertF32, float)        \
162   V(F64ReinterpretI64, int64_t)  \
163   V(I32AsmjsSConvertF32, float)  \
164   V(I32AsmjsUConvertF32, float)  \
165   V(I32AsmjsSConvertF64, double) \
166   V(I32AsmjsUConvertF64, double) \
167   V(F32Sqrt, float)              \
168   V(F64Sqrt, double)
169 
ExecuteI32DivS(int32_t a,int32_t b,TrapReason * trap)170 static inline int32_t ExecuteI32DivS(int32_t a, int32_t b, TrapReason* trap) {
171   if (b == 0) {
172     *trap = kTrapDivByZero;
173     return 0;
174   }
175   if (b == -1 && a == std::numeric_limits<int32_t>::min()) {
176     *trap = kTrapDivUnrepresentable;
177     return 0;
178   }
179   return a / b;
180 }
181 
ExecuteI32DivU(uint32_t a,uint32_t b,TrapReason * trap)182 static inline uint32_t ExecuteI32DivU(uint32_t a, uint32_t b,
183                                       TrapReason* trap) {
184   if (b == 0) {
185     *trap = kTrapDivByZero;
186     return 0;
187   }
188   return a / b;
189 }
190 
ExecuteI32RemS(int32_t a,int32_t b,TrapReason * trap)191 static inline int32_t ExecuteI32RemS(int32_t a, int32_t b, TrapReason* trap) {
192   if (b == 0) {
193     *trap = kTrapRemByZero;
194     return 0;
195   }
196   if (b == -1) return 0;
197   return a % b;
198 }
199 
ExecuteI32RemU(uint32_t a,uint32_t b,TrapReason * trap)200 static inline uint32_t ExecuteI32RemU(uint32_t a, uint32_t b,
201                                       TrapReason* trap) {
202   if (b == 0) {
203     *trap = kTrapRemByZero;
204     return 0;
205   }
206   return a % b;
207 }
208 
ExecuteI32Shl(uint32_t a,uint32_t b,TrapReason * trap)209 static inline uint32_t ExecuteI32Shl(uint32_t a, uint32_t b, TrapReason* trap) {
210   return a << (b & 0x1f);
211 }
212 
ExecuteI32ShrU(uint32_t a,uint32_t b,TrapReason * trap)213 static inline uint32_t ExecuteI32ShrU(uint32_t a, uint32_t b,
214                                       TrapReason* trap) {
215   return a >> (b & 0x1f);
216 }
217 
ExecuteI32ShrS(int32_t a,int32_t b,TrapReason * trap)218 static inline int32_t ExecuteI32ShrS(int32_t a, int32_t b, TrapReason* trap) {
219   return a >> (b & 0x1f);
220 }
221 
ExecuteI64DivS(int64_t a,int64_t b,TrapReason * trap)222 static inline int64_t ExecuteI64DivS(int64_t a, int64_t b, TrapReason* trap) {
223   if (b == 0) {
224     *trap = kTrapDivByZero;
225     return 0;
226   }
227   if (b == -1 && a == std::numeric_limits<int64_t>::min()) {
228     *trap = kTrapDivUnrepresentable;
229     return 0;
230   }
231   return a / b;
232 }
233 
ExecuteI64DivU(uint64_t a,uint64_t b,TrapReason * trap)234 static inline uint64_t ExecuteI64DivU(uint64_t a, uint64_t b,
235                                       TrapReason* trap) {
236   if (b == 0) {
237     *trap = kTrapDivByZero;
238     return 0;
239   }
240   return a / b;
241 }
242 
ExecuteI64RemS(int64_t a,int64_t b,TrapReason * trap)243 static inline int64_t ExecuteI64RemS(int64_t a, int64_t b, TrapReason* trap) {
244   if (b == 0) {
245     *trap = kTrapRemByZero;
246     return 0;
247   }
248   if (b == -1) return 0;
249   return a % b;
250 }
251 
ExecuteI64RemU(uint64_t a,uint64_t b,TrapReason * trap)252 static inline uint64_t ExecuteI64RemU(uint64_t a, uint64_t b,
253                                       TrapReason* trap) {
254   if (b == 0) {
255     *trap = kTrapRemByZero;
256     return 0;
257   }
258   return a % b;
259 }
260 
ExecuteI64Shl(uint64_t a,uint64_t b,TrapReason * trap)261 static inline uint64_t ExecuteI64Shl(uint64_t a, uint64_t b, TrapReason* trap) {
262   return a << (b & 0x3f);
263 }
264 
ExecuteI64ShrU(uint64_t a,uint64_t b,TrapReason * trap)265 static inline uint64_t ExecuteI64ShrU(uint64_t a, uint64_t b,
266                                       TrapReason* trap) {
267   return a >> (b & 0x3f);
268 }
269 
ExecuteI64ShrS(int64_t a,int64_t b,TrapReason * trap)270 static inline int64_t ExecuteI64ShrS(int64_t a, int64_t b, TrapReason* trap) {
271   return a >> (b & 0x3f);
272 }
273 
ExecuteI32Ror(uint32_t a,uint32_t b,TrapReason * trap)274 static inline uint32_t ExecuteI32Ror(uint32_t a, uint32_t b, TrapReason* trap) {
275   uint32_t shift = (b & 0x1f);
276   return (a >> shift) | (a << (32 - shift));
277 }
278 
ExecuteI32Rol(uint32_t a,uint32_t b,TrapReason * trap)279 static inline uint32_t ExecuteI32Rol(uint32_t a, uint32_t b, TrapReason* trap) {
280   uint32_t shift = (b & 0x1f);
281   return (a << shift) | (a >> (32 - shift));
282 }
283 
ExecuteI64Ror(uint64_t a,uint64_t b,TrapReason * trap)284 static inline uint64_t ExecuteI64Ror(uint64_t a, uint64_t b, TrapReason* trap) {
285   uint32_t shift = (b & 0x3f);
286   return (a >> shift) | (a << (64 - shift));
287 }
288 
ExecuteI64Rol(uint64_t a,uint64_t b,TrapReason * trap)289 static inline uint64_t ExecuteI64Rol(uint64_t a, uint64_t b, TrapReason* trap) {
290   uint32_t shift = (b & 0x3f);
291   return (a << shift) | (a >> (64 - shift));
292 }
293 
ExecuteF32Min(float a,float b,TrapReason * trap)294 static inline float ExecuteF32Min(float a, float b, TrapReason* trap) {
295   return JSMin(a, b);
296 }
297 
ExecuteF32Max(float a,float b,TrapReason * trap)298 static inline float ExecuteF32Max(float a, float b, TrapReason* trap) {
299   return JSMax(a, b);
300 }
301 
ExecuteF32CopySign(float a,float b,TrapReason * trap)302 static inline float ExecuteF32CopySign(float a, float b, TrapReason* trap) {
303   return copysignf(a, b);
304 }
305 
ExecuteF64Min(double a,double b,TrapReason * trap)306 static inline double ExecuteF64Min(double a, double b, TrapReason* trap) {
307   return JSMin(a, b);
308 }
309 
ExecuteF64Max(double a,double b,TrapReason * trap)310 static inline double ExecuteF64Max(double a, double b, TrapReason* trap) {
311   return JSMax(a, b);
312 }
313 
ExecuteF64CopySign(double a,double b,TrapReason * trap)314 static inline double ExecuteF64CopySign(double a, double b, TrapReason* trap) {
315   return copysign(a, b);
316 }
317 
ExecuteI32AsmjsDivS(int32_t a,int32_t b,TrapReason * trap)318 static inline int32_t ExecuteI32AsmjsDivS(int32_t a, int32_t b,
319                                           TrapReason* trap) {
320   if (b == 0) return 0;
321   if (b == -1 && a == std::numeric_limits<int32_t>::min()) {
322     return std::numeric_limits<int32_t>::min();
323   }
324   return a / b;
325 }
326 
ExecuteI32AsmjsDivU(uint32_t a,uint32_t b,TrapReason * trap)327 static inline uint32_t ExecuteI32AsmjsDivU(uint32_t a, uint32_t b,
328                                            TrapReason* trap) {
329   if (b == 0) return 0;
330   return a / b;
331 }
332 
ExecuteI32AsmjsRemS(int32_t a,int32_t b,TrapReason * trap)333 static inline int32_t ExecuteI32AsmjsRemS(int32_t a, int32_t b,
334                                           TrapReason* trap) {
335   if (b == 0) return 0;
336   if (b == -1) return 0;
337   return a % b;
338 }
339 
ExecuteI32AsmjsRemU(uint32_t a,uint32_t b,TrapReason * trap)340 static inline uint32_t ExecuteI32AsmjsRemU(uint32_t a, uint32_t b,
341                                            TrapReason* trap) {
342   if (b == 0) return 0;
343   return a % b;
344 }
345 
ExecuteI32AsmjsSConvertF32(float a,TrapReason * trap)346 static inline int32_t ExecuteI32AsmjsSConvertF32(float a, TrapReason* trap) {
347   return DoubleToInt32(a);
348 }
349 
ExecuteI32AsmjsUConvertF32(float a,TrapReason * trap)350 static inline uint32_t ExecuteI32AsmjsUConvertF32(float a, TrapReason* trap) {
351   return DoubleToUint32(a);
352 }
353 
ExecuteI32AsmjsSConvertF64(double a,TrapReason * trap)354 static inline int32_t ExecuteI32AsmjsSConvertF64(double a, TrapReason* trap) {
355   return DoubleToInt32(a);
356 }
357 
ExecuteI32AsmjsUConvertF64(double a,TrapReason * trap)358 static inline uint32_t ExecuteI32AsmjsUConvertF64(double a, TrapReason* trap) {
359   return DoubleToUint32(a);
360 }
361 
ExecuteI32Clz(uint32_t val,TrapReason * trap)362 static int32_t ExecuteI32Clz(uint32_t val, TrapReason* trap) {
363   return base::bits::CountLeadingZeros32(val);
364 }
365 
ExecuteI32Ctz(uint32_t val,TrapReason * trap)366 static uint32_t ExecuteI32Ctz(uint32_t val, TrapReason* trap) {
367   return base::bits::CountTrailingZeros32(val);
368 }
369 
ExecuteI32Popcnt(uint32_t val,TrapReason * trap)370 static uint32_t ExecuteI32Popcnt(uint32_t val, TrapReason* trap) {
371   return word32_popcnt_wrapper(&val);
372 }
373 
ExecuteI32Eqz(uint32_t val,TrapReason * trap)374 static inline uint32_t ExecuteI32Eqz(uint32_t val, TrapReason* trap) {
375   return val == 0 ? 1 : 0;
376 }
377 
ExecuteI64Clz(uint64_t val,TrapReason * trap)378 static int64_t ExecuteI64Clz(uint64_t val, TrapReason* trap) {
379   return base::bits::CountLeadingZeros64(val);
380 }
381 
ExecuteI64Ctz(uint64_t val,TrapReason * trap)382 static inline uint64_t ExecuteI64Ctz(uint64_t val, TrapReason* trap) {
383   return base::bits::CountTrailingZeros64(val);
384 }
385 
ExecuteI64Popcnt(uint64_t val,TrapReason * trap)386 static inline int64_t ExecuteI64Popcnt(uint64_t val, TrapReason* trap) {
387   return word64_popcnt_wrapper(&val);
388 }
389 
ExecuteI64Eqz(uint64_t val,TrapReason * trap)390 static inline int32_t ExecuteI64Eqz(uint64_t val, TrapReason* trap) {
391   return val == 0 ? 1 : 0;
392 }
393 
ExecuteF32Abs(float a,TrapReason * trap)394 static inline float ExecuteF32Abs(float a, TrapReason* trap) {
395   return bit_cast<float>(bit_cast<uint32_t>(a) & 0x7fffffff);
396 }
397 
ExecuteF32Neg(float a,TrapReason * trap)398 static inline float ExecuteF32Neg(float a, TrapReason* trap) {
399   return bit_cast<float>(bit_cast<uint32_t>(a) ^ 0x80000000);
400 }
401 
ExecuteF32Ceil(float a,TrapReason * trap)402 static inline float ExecuteF32Ceil(float a, TrapReason* trap) {
403   return ceilf(a);
404 }
405 
ExecuteF32Floor(float a,TrapReason * trap)406 static inline float ExecuteF32Floor(float a, TrapReason* trap) {
407   return floorf(a);
408 }
409 
ExecuteF32Trunc(float a,TrapReason * trap)410 static inline float ExecuteF32Trunc(float a, TrapReason* trap) {
411   return truncf(a);
412 }
413 
ExecuteF32NearestInt(float a,TrapReason * trap)414 static inline float ExecuteF32NearestInt(float a, TrapReason* trap) {
415   return nearbyintf(a);
416 }
417 
ExecuteF32Sqrt(float a,TrapReason * trap)418 static inline float ExecuteF32Sqrt(float a, TrapReason* trap) {
419   float result = sqrtf(a);
420   return result;
421 }
422 
ExecuteF64Abs(double a,TrapReason * trap)423 static inline double ExecuteF64Abs(double a, TrapReason* trap) {
424   return bit_cast<double>(bit_cast<uint64_t>(a) & 0x7fffffffffffffff);
425 }
426 
ExecuteF64Neg(double a,TrapReason * trap)427 static inline double ExecuteF64Neg(double a, TrapReason* trap) {
428   return bit_cast<double>(bit_cast<uint64_t>(a) ^ 0x8000000000000000);
429 }
430 
ExecuteF64Ceil(double a,TrapReason * trap)431 static inline double ExecuteF64Ceil(double a, TrapReason* trap) {
432   return ceil(a);
433 }
434 
ExecuteF64Floor(double a,TrapReason * trap)435 static inline double ExecuteF64Floor(double a, TrapReason* trap) {
436   return floor(a);
437 }
438 
ExecuteF64Trunc(double a,TrapReason * trap)439 static inline double ExecuteF64Trunc(double a, TrapReason* trap) {
440   return trunc(a);
441 }
442 
ExecuteF64NearestInt(double a,TrapReason * trap)443 static inline double ExecuteF64NearestInt(double a, TrapReason* trap) {
444   return nearbyint(a);
445 }
446 
ExecuteF64Sqrt(double a,TrapReason * trap)447 static inline double ExecuteF64Sqrt(double a, TrapReason* trap) {
448   return sqrt(a);
449 }
450 
ExecuteI32SConvertF32(float a,TrapReason * trap)451 static int32_t ExecuteI32SConvertF32(float a, TrapReason* trap) {
452   // The upper bound is (INT32_MAX + 1), which is the lowest float-representable
453   // number above INT32_MAX which cannot be represented as int32.
454   float upper_bound = 2147483648.0f;
455   // We use INT32_MIN as a lower bound because (INT32_MIN - 1) is not
456   // representable as float, and no number between (INT32_MIN - 1) and INT32_MIN
457   // is.
458   float lower_bound = static_cast<float>(INT32_MIN);
459   if (a < upper_bound && a >= lower_bound) {
460     return static_cast<int32_t>(a);
461   }
462   *trap = kTrapFloatUnrepresentable;
463   return 0;
464 }
465 
ExecuteI32SConvertF64(double a,TrapReason * trap)466 static int32_t ExecuteI32SConvertF64(double a, TrapReason* trap) {
467   // The upper bound is (INT32_MAX + 1), which is the lowest double-
468   // representable number above INT32_MAX which cannot be represented as int32.
469   double upper_bound = 2147483648.0;
470   // The lower bound is (INT32_MIN - 1), which is the greatest double-
471   // representable number below INT32_MIN which cannot be represented as int32.
472   double lower_bound = -2147483649.0;
473   if (a < upper_bound && a > lower_bound) {
474     return static_cast<int32_t>(a);
475   }
476   *trap = kTrapFloatUnrepresentable;
477   return 0;
478 }
479 
ExecuteI32UConvertF32(float a,TrapReason * trap)480 static uint32_t ExecuteI32UConvertF32(float a, TrapReason* trap) {
481   // The upper bound is (UINT32_MAX + 1), which is the lowest
482   // float-representable number above UINT32_MAX which cannot be represented as
483   // uint32.
484   double upper_bound = 4294967296.0f;
485   double lower_bound = -1.0f;
486   if (a < upper_bound && a > lower_bound) {
487     return static_cast<uint32_t>(a);
488   }
489   *trap = kTrapFloatUnrepresentable;
490   return 0;
491 }
492 
ExecuteI32UConvertF64(double a,TrapReason * trap)493 static uint32_t ExecuteI32UConvertF64(double a, TrapReason* trap) {
494   // The upper bound is (UINT32_MAX + 1), which is the lowest
495   // double-representable number above UINT32_MAX which cannot be represented as
496   // uint32.
497   double upper_bound = 4294967296.0;
498   double lower_bound = -1.0;
499   if (a < upper_bound && a > lower_bound) {
500     return static_cast<uint32_t>(a);
501   }
502   *trap = kTrapFloatUnrepresentable;
503   return 0;
504 }
505 
ExecuteI32ConvertI64(int64_t a,TrapReason * trap)506 static inline uint32_t ExecuteI32ConvertI64(int64_t a, TrapReason* trap) {
507   return static_cast<uint32_t>(a & 0xFFFFFFFF);
508 }
509 
ExecuteI64SConvertF32(float a,TrapReason * trap)510 static int64_t ExecuteI64SConvertF32(float a, TrapReason* trap) {
511   int64_t output;
512   if (!float32_to_int64_wrapper(&a, &output)) {
513     *trap = kTrapFloatUnrepresentable;
514   }
515   return output;
516 }
517 
ExecuteI64SConvertF64(double a,TrapReason * trap)518 static int64_t ExecuteI64SConvertF64(double a, TrapReason* trap) {
519   int64_t output;
520   if (!float64_to_int64_wrapper(&a, &output)) {
521     *trap = kTrapFloatUnrepresentable;
522   }
523   return output;
524 }
525 
ExecuteI64UConvertF32(float a,TrapReason * trap)526 static uint64_t ExecuteI64UConvertF32(float a, TrapReason* trap) {
527   uint64_t output;
528   if (!float32_to_uint64_wrapper(&a, &output)) {
529     *trap = kTrapFloatUnrepresentable;
530   }
531   return output;
532 }
533 
ExecuteI64UConvertF64(double a,TrapReason * trap)534 static uint64_t ExecuteI64UConvertF64(double a, TrapReason* trap) {
535   uint64_t output;
536   if (!float64_to_uint64_wrapper(&a, &output)) {
537     *trap = kTrapFloatUnrepresentable;
538   }
539   return output;
540 }
541 
ExecuteI64SConvertI32(int32_t a,TrapReason * trap)542 static inline int64_t ExecuteI64SConvertI32(int32_t a, TrapReason* trap) {
543   return static_cast<int64_t>(a);
544 }
545 
ExecuteI64UConvertI32(uint32_t a,TrapReason * trap)546 static inline int64_t ExecuteI64UConvertI32(uint32_t a, TrapReason* trap) {
547   return static_cast<uint64_t>(a);
548 }
549 
ExecuteF32SConvertI32(int32_t a,TrapReason * trap)550 static inline float ExecuteF32SConvertI32(int32_t a, TrapReason* trap) {
551   return static_cast<float>(a);
552 }
553 
ExecuteF32UConvertI32(uint32_t a,TrapReason * trap)554 static inline float ExecuteF32UConvertI32(uint32_t a, TrapReason* trap) {
555   return static_cast<float>(a);
556 }
557 
ExecuteF32SConvertI64(int64_t a,TrapReason * trap)558 static inline float ExecuteF32SConvertI64(int64_t a, TrapReason* trap) {
559   float output;
560   int64_to_float32_wrapper(&a, &output);
561   return output;
562 }
563 
ExecuteF32UConvertI64(uint64_t a,TrapReason * trap)564 static inline float ExecuteF32UConvertI64(uint64_t a, TrapReason* trap) {
565   float output;
566   uint64_to_float32_wrapper(&a, &output);
567   return output;
568 }
569 
ExecuteF32ConvertF64(double a,TrapReason * trap)570 static inline float ExecuteF32ConvertF64(double a, TrapReason* trap) {
571   return static_cast<float>(a);
572 }
573 
ExecuteF32ReinterpretI32(int32_t a,TrapReason * trap)574 static inline float ExecuteF32ReinterpretI32(int32_t a, TrapReason* trap) {
575   return bit_cast<float>(a);
576 }
577 
ExecuteF64SConvertI32(int32_t a,TrapReason * trap)578 static inline double ExecuteF64SConvertI32(int32_t a, TrapReason* trap) {
579   return static_cast<double>(a);
580 }
581 
ExecuteF64UConvertI32(uint32_t a,TrapReason * trap)582 static inline double ExecuteF64UConvertI32(uint32_t a, TrapReason* trap) {
583   return static_cast<double>(a);
584 }
585 
ExecuteF64SConvertI64(int64_t a,TrapReason * trap)586 static inline double ExecuteF64SConvertI64(int64_t a, TrapReason* trap) {
587   double output;
588   int64_to_float64_wrapper(&a, &output);
589   return output;
590 }
591 
ExecuteF64UConvertI64(uint64_t a,TrapReason * trap)592 static inline double ExecuteF64UConvertI64(uint64_t a, TrapReason* trap) {
593   double output;
594   uint64_to_float64_wrapper(&a, &output);
595   return output;
596 }
597 
ExecuteF64ConvertF32(float a,TrapReason * trap)598 static inline double ExecuteF64ConvertF32(float a, TrapReason* trap) {
599   return static_cast<double>(a);
600 }
601 
ExecuteF64ReinterpretI64(int64_t a,TrapReason * trap)602 static inline double ExecuteF64ReinterpretI64(int64_t a, TrapReason* trap) {
603   return bit_cast<double>(a);
604 }
605 
ExecuteI32ReinterpretF32(WasmVal a)606 static inline int32_t ExecuteI32ReinterpretF32(WasmVal a) {
607   return a.to_unchecked<int32_t>();
608 }
609 
ExecuteI64ReinterpretF64(WasmVal a)610 static inline int64_t ExecuteI64ReinterpretF64(WasmVal a) {
611   return a.to_unchecked<int64_t>();
612 }
613 
ExecuteGrowMemory(uint32_t delta_pages,WasmInstance * instance)614 static inline int32_t ExecuteGrowMemory(uint32_t delta_pages,
615                                         WasmInstance* instance) {
616   // TODO(ahaas): Move memory allocation to wasm-module.cc for better
617   // encapsulation.
618   if (delta_pages > FLAG_wasm_max_mem_pages ||
619       delta_pages > instance->module->max_mem_pages) {
620     return -1;
621   }
622   uint32_t old_size = instance->mem_size;
623   uint32_t new_size;
624   byte* new_mem_start;
625   if (instance->mem_size == 0) {
626     // TODO(gdeepti): Fix bounds check to take into account size of memtype.
627     new_size = delta_pages * wasm::WasmModule::kPageSize;
628     new_mem_start = static_cast<byte*>(calloc(new_size, sizeof(byte)));
629     if (!new_mem_start) {
630       return -1;
631     }
632   } else {
633     DCHECK_NOT_NULL(instance->mem_start);
634     new_size = old_size + delta_pages * wasm::WasmModule::kPageSize;
635     if (new_size / wasm::WasmModule::kPageSize > FLAG_wasm_max_mem_pages ||
636         new_size / wasm::WasmModule::kPageSize >
637             instance->module->max_mem_pages) {
638       return -1;
639     }
640     new_mem_start = static_cast<byte*>(realloc(instance->mem_start, new_size));
641     if (!new_mem_start) {
642       return -1;
643     }
644     // Zero initializing uninitialized memory from realloc
645     memset(new_mem_start + old_size, 0, new_size - old_size);
646   }
647   instance->mem_start = new_mem_start;
648   instance->mem_size = new_size;
649   return static_cast<int32_t>(old_size / WasmModule::kPageSize);
650 }
651 
652 enum InternalOpcode {
653 #define DECL_INTERNAL_ENUM(name, value) kInternal##name = value,
654   FOREACH_INTERNAL_OPCODE(DECL_INTERNAL_ENUM)
655 #undef DECL_INTERNAL_ENUM
656 };
657 
OpcodeName(uint32_t val)658 static const char* OpcodeName(uint32_t val) {
659   switch (val) {
660 #define DECL_INTERNAL_CASE(name, value) \
661   case kInternal##name:                 \
662     return "Internal" #name;
663     FOREACH_INTERNAL_OPCODE(DECL_INTERNAL_CASE)
664 #undef DECL_INTERNAL_CASE
665   }
666   return WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(val));
667 }
668 
669 static const int kRunSteps = 1000;
670 
671 // A helper class to compute the control transfers for each bytecode offset.
672 // Control transfers allow Br, BrIf, BrTable, If, Else, and End bytecodes to
673 // be directly executed without the need to dynamically track blocks.
674 class ControlTransfers : public ZoneObject {
675  public:
676   ControlTransferMap map_;
677 
ControlTransfers(Zone * zone,BodyLocalDecls * locals,const byte * start,const byte * end)678   ControlTransfers(Zone* zone, BodyLocalDecls* locals, const byte* start,
679                    const byte* end)
680       : map_(zone) {
681     // Represents a control flow label.
682     struct CLabel : public ZoneObject {
683       const byte* target;
684       ZoneVector<const byte*> refs;
685 
686       explicit CLabel(Zone* zone) : target(nullptr), refs(zone) {}
687 
688       // Bind this label to the given PC.
689       void Bind(ControlTransferMap* map, const byte* start, const byte* pc) {
690         DCHECK_NULL(target);
691         target = pc;
692         for (auto from_pc : refs) {
693           auto pcdiff = static_cast<pcdiff_t>(target - from_pc);
694           size_t offset = static_cast<size_t>(from_pc - start);
695           (*map)[offset] = pcdiff;
696         }
697       }
698 
699       // Reference this label from the given location.
700       void Ref(ControlTransferMap* map, const byte* start,
701                const byte* from_pc) {
702         if (target) {
703           // Target being bound before a reference means this is a loop.
704           DCHECK_EQ(kExprLoop, *target);
705           auto pcdiff = static_cast<pcdiff_t>(target - from_pc);
706           size_t offset = static_cast<size_t>(from_pc - start);
707           (*map)[offset] = pcdiff;
708         } else {
709           refs.push_back(from_pc);
710         }
711       }
712     };
713 
714     // An entry in the control stack.
715     struct Control {
716       const byte* pc;
717       CLabel* end_label;
718       CLabel* else_label;
719 
720       void Ref(ControlTransferMap* map, const byte* start,
721                const byte* from_pc) {
722         end_label->Ref(map, start, from_pc);
723       }
724     };
725 
726     // Compute the ControlTransfer map.
727     // This algorithm maintains a stack of control constructs similar to the
728     // AST decoder. The {control_stack} allows matching {br,br_if,br_table}
729     // bytecodes with their target, as well as determining whether the current
730     // bytecodes are within the true or false block of an else.
731     std::vector<Control> control_stack;
732     CLabel* func_label = new (zone) CLabel(zone);
733     control_stack.push_back({start, func_label, nullptr});
734     for (BytecodeIterator i(start, end, locals); i.has_next(); i.next()) {
735       WasmOpcode opcode = i.current();
736       TRACE("@%u: control %s\n", i.pc_offset(),
737             WasmOpcodes::OpcodeName(opcode));
738       switch (opcode) {
739         case kExprBlock: {
740           TRACE("control @%u: Block\n", i.pc_offset());
741           CLabel* label = new (zone) CLabel(zone);
742           control_stack.push_back({i.pc(), label, nullptr});
743           break;
744         }
745         case kExprLoop: {
746           TRACE("control @%u: Loop\n", i.pc_offset());
747           CLabel* label = new (zone) CLabel(zone);
748           control_stack.push_back({i.pc(), label, nullptr});
749           label->Bind(&map_, start, i.pc());
750           break;
751         }
752         case kExprIf: {
753           TRACE("control @%u: If\n", i.pc_offset());
754           CLabel* end_label = new (zone) CLabel(zone);
755           CLabel* else_label = new (zone) CLabel(zone);
756           control_stack.push_back({i.pc(), end_label, else_label});
757           else_label->Ref(&map_, start, i.pc());
758           break;
759         }
760         case kExprElse: {
761           Control* c = &control_stack.back();
762           TRACE("control @%u: Else\n", i.pc_offset());
763           c->end_label->Ref(&map_, start, i.pc());
764           DCHECK_NOT_NULL(c->else_label);
765           c->else_label->Bind(&map_, start, i.pc() + 1);
766           c->else_label = nullptr;
767           break;
768         }
769         case kExprEnd: {
770           Control* c = &control_stack.back();
771           TRACE("control @%u: End\n", i.pc_offset());
772           if (c->end_label->target) {
773             // only loops have bound labels.
774             DCHECK_EQ(kExprLoop, *c->pc);
775           } else {
776             if (c->else_label) c->else_label->Bind(&map_, start, i.pc());
777             c->end_label->Bind(&map_, start, i.pc() + 1);
778           }
779           control_stack.pop_back();
780           break;
781         }
782         case kExprBr: {
783           BreakDepthOperand operand(&i, i.pc());
784           TRACE("control @%u: Br[depth=%u]\n", i.pc_offset(), operand.depth);
785           Control* c = &control_stack[control_stack.size() - operand.depth - 1];
786           c->Ref(&map_, start, i.pc());
787           break;
788         }
789         case kExprBrIf: {
790           BreakDepthOperand operand(&i, i.pc());
791           TRACE("control @%u: BrIf[depth=%u]\n", i.pc_offset(), operand.depth);
792           Control* c = &control_stack[control_stack.size() - operand.depth - 1];
793           c->Ref(&map_, start, i.pc());
794           break;
795         }
796         case kExprBrTable: {
797           BranchTableOperand operand(&i, i.pc());
798           BranchTableIterator iterator(&i, operand);
799           TRACE("control @%u: BrTable[count=%u]\n", i.pc_offset(),
800                 operand.table_count);
801           while (iterator.has_next()) {
802             uint32_t j = iterator.cur_index();
803             uint32_t target = iterator.next();
804             Control* c = &control_stack[control_stack.size() - target - 1];
805             c->Ref(&map_, start, i.pc() + j);
806           }
807           break;
808         }
809         default: {
810           break;
811         }
812       }
813     }
814     if (!func_label->target) func_label->Bind(&map_, start, end);
815   }
816 
Lookup(pc_t from)817   pcdiff_t Lookup(pc_t from) {
818     auto result = map_.find(from);
819     if (result == map_.end()) {
820       V8_Fatal(__FILE__, __LINE__, "no control target for pc %zu", from);
821     }
822     return result->second;
823   }
824 };
825 
826 // Code and metadata needed to execute a function.
827 struct InterpreterCode {
828   const WasmFunction* function;  // wasm function
829   BodyLocalDecls locals;         // local declarations
830   const byte* orig_start;        // start of original code
831   const byte* orig_end;          // end of original code
832   byte* start;                   // start of (maybe altered) code
833   byte* end;                     // end of (maybe altered) code
834   ControlTransfers* targets;     // helper for control flow.
835 
atv8::internal::wasm::InterpreterCode836   const byte* at(pc_t pc) { return start + pc; }
837 };
838 
839 // The main storage for interpreter code. It maps {WasmFunction} to the
840 // metadata needed to execute each function.
841 class CodeMap {
842  public:
843   Zone* zone_;
844   const WasmModule* module_;
845   ZoneVector<InterpreterCode> interpreter_code_;
846 
CodeMap(const WasmModule * module,const uint8_t * module_start,Zone * zone)847   CodeMap(const WasmModule* module, const uint8_t* module_start, Zone* zone)
848       : zone_(zone), module_(module), interpreter_code_(zone) {
849     if (module == nullptr) return;
850     for (size_t i = 0; i < module->functions.size(); ++i) {
851       const WasmFunction* function = &module->functions[i];
852       const byte* code_start = module_start + function->code_start_offset;
853       const byte* code_end = module_start + function->code_end_offset;
854       AddFunction(function, code_start, code_end);
855     }
856   }
857 
FindCode(const WasmFunction * function)858   InterpreterCode* FindCode(const WasmFunction* function) {
859     if (function->func_index < interpreter_code_.size()) {
860       InterpreterCode* code = &interpreter_code_[function->func_index];
861       DCHECK_EQ(function, code->function);
862       return Preprocess(code);
863     }
864     return nullptr;
865   }
866 
GetCode(uint32_t function_index)867   InterpreterCode* GetCode(uint32_t function_index) {
868     CHECK_LT(function_index, interpreter_code_.size());
869     return Preprocess(&interpreter_code_[function_index]);
870   }
871 
GetIndirectCode(uint32_t table_index,uint32_t entry_index)872   InterpreterCode* GetIndirectCode(uint32_t table_index, uint32_t entry_index) {
873     if (table_index >= module_->function_tables.size()) return nullptr;
874     const WasmIndirectFunctionTable* table =
875         &module_->function_tables[table_index];
876     if (entry_index >= table->values.size()) return nullptr;
877     uint32_t index = table->values[entry_index];
878     if (index >= interpreter_code_.size()) return nullptr;
879     return GetCode(index);
880   }
881 
Preprocess(InterpreterCode * code)882   InterpreterCode* Preprocess(InterpreterCode* code) {
883     if (code->targets == nullptr && code->start) {
884       // Compute the control targets map and the local declarations.
885       CHECK(DecodeLocalDecls(&code->locals, code->start, code->end));
886       code->targets = new (zone_) ControlTransfers(
887           zone_, &code->locals, code->orig_start, code->orig_end);
888     }
889     return code;
890   }
891 
AddFunction(const WasmFunction * function,const byte * code_start,const byte * code_end)892   int AddFunction(const WasmFunction* function, const byte* code_start,
893                   const byte* code_end) {
894     InterpreterCode code = {
895         function, BodyLocalDecls(zone_),         code_start,
896         code_end, const_cast<byte*>(code_start), const_cast<byte*>(code_end),
897         nullptr};
898 
899     DCHECK_EQ(interpreter_code_.size(), function->func_index);
900     interpreter_code_.push_back(code);
901     return static_cast<int>(interpreter_code_.size()) - 1;
902   }
903 
SetFunctionCode(const WasmFunction * function,const byte * start,const byte * end)904   bool SetFunctionCode(const WasmFunction* function, const byte* start,
905                        const byte* end) {
906     InterpreterCode* code = FindCode(function);
907     if (code == nullptr) return false;
908     code->targets = nullptr;
909     code->orig_start = start;
910     code->orig_end = end;
911     code->start = const_cast<byte*>(start);
912     code->end = const_cast<byte*>(end);
913     Preprocess(code);
914     return true;
915   }
916 };
917 
918 namespace {
919 // Responsible for executing code directly.
920 class ThreadImpl {
921  public:
ThreadImpl(Zone * zone,CodeMap * codemap,WasmInstance * instance)922   ThreadImpl(Zone* zone, CodeMap* codemap, WasmInstance* instance)
923       : codemap_(codemap),
924         instance_(instance),
925         stack_(zone),
926         frames_(zone),
927         blocks_(zone) {}
928 
929   //==========================================================================
930   // Implementation of public interface for WasmInterpreter::Thread.
931   //==========================================================================
932 
state()933   WasmInterpreter::State state() { return state_; }
934 
PushFrame(const WasmFunction * function,WasmVal * args)935   void PushFrame(const WasmFunction* function, WasmVal* args) {
936     InterpreterCode* code = codemap()->FindCode(function);
937     CHECK_NOT_NULL(code);
938     ++num_interpreted_calls_;
939     frames_.push_back({code, 0, 0, stack_.size()});
940     for (size_t i = 0; i < function->sig->parameter_count(); ++i) {
941       stack_.push_back(args[i]);
942     }
943     frames_.back().ret_pc = InitLocals(code);
944     blocks_.push_back(
945         {0, stack_.size(), frames_.size(),
946          static_cast<uint32_t>(code->function->sig->return_count())});
947     TRACE("  => PushFrame(#%u @%zu)\n", code->function->func_index,
948           frames_.back().ret_pc);
949   }
950 
Run()951   WasmInterpreter::State Run() {
952     do {
953       TRACE("  => Run()\n");
954       if (state_ == WasmInterpreter::STOPPED ||
955           state_ == WasmInterpreter::PAUSED) {
956         state_ = WasmInterpreter::RUNNING;
957         Execute(frames_.back().code, frames_.back().ret_pc, kRunSteps);
958       }
959     } while (state_ == WasmInterpreter::STOPPED);
960     return state_;
961   }
962 
Step()963   WasmInterpreter::State Step() {
964     TRACE("  => Step()\n");
965     if (state_ == WasmInterpreter::STOPPED ||
966         state_ == WasmInterpreter::PAUSED) {
967       state_ = WasmInterpreter::RUNNING;
968       Execute(frames_.back().code, frames_.back().ret_pc, 1);
969     }
970     return state_;
971   }
972 
Pause()973   void Pause() { UNIMPLEMENTED(); }
974 
Reset()975   void Reset() {
976     TRACE("----- RESET -----\n");
977     stack_.clear();
978     frames_.clear();
979     state_ = WasmInterpreter::STOPPED;
980     trap_reason_ = kTrapCount;
981     possible_nondeterminism_ = false;
982   }
983 
GetFrameCount()984   int GetFrameCount() {
985     DCHECK_GE(kMaxInt, frames_.size());
986     return static_cast<int>(frames_.size());
987   }
988 
989   template <typename FrameCons>
GetMutableFrame(int index,FrameCons frame_cons)990   InterpretedFrame GetMutableFrame(int index, FrameCons frame_cons) {
991     DCHECK_LE(0, index);
992     DCHECK_GT(frames_.size(), index);
993     Frame* frame = &frames_[index];
994     DCHECK_GE(kMaxInt, frame->ret_pc);
995     DCHECK_GE(kMaxInt, frame->sp);
996     DCHECK_GE(kMaxInt, frame->llimit());
997     return frame_cons(frame->code->function, static_cast<int>(frame->ret_pc),
998                       static_cast<int>(frame->sp),
999                       static_cast<int>(frame->llimit()));
1000   }
1001 
GetReturnValue(int index)1002   WasmVal GetReturnValue(int index) {
1003     if (state_ == WasmInterpreter::TRAPPED) return WasmVal(0xdeadbeef);
1004     CHECK_EQ(WasmInterpreter::FINISHED, state_);
1005     CHECK_LT(static_cast<size_t>(index), stack_.size());
1006     return stack_[index];
1007   }
1008 
GetBreakpointPc()1009   pc_t GetBreakpointPc() { return break_pc_; }
1010 
PossibleNondeterminism()1011   bool PossibleNondeterminism() { return possible_nondeterminism_; }
1012 
NumInterpretedCalls()1013   uint64_t NumInterpretedCalls() { return num_interpreted_calls_; }
1014 
AddBreakFlags(uint8_t flags)1015   void AddBreakFlags(uint8_t flags) { break_flags_ |= flags; }
1016 
ClearBreakFlags()1017   void ClearBreakFlags() { break_flags_ = WasmInterpreter::BreakFlag::None; }
1018 
1019  private:
1020   // Entries on the stack of functions being evaluated.
1021   struct Frame {
1022     InterpreterCode* code;
1023     pc_t call_pc;
1024     pc_t ret_pc;
1025     sp_t sp;
1026 
1027     // Limit of parameters.
plimitv8::internal::wasm::__anon86b361180111::ThreadImpl::Frame1028     sp_t plimit() { return sp + code->function->sig->parameter_count(); }
1029     // Limit of locals.
llimitv8::internal::wasm::__anon86b361180111::ThreadImpl::Frame1030     sp_t llimit() { return plimit() + code->locals.type_list.size(); }
1031   };
1032 
1033   struct Block {
1034     pc_t pc;
1035     sp_t sp;
1036     size_t fp;
1037     unsigned arity;
1038   };
1039 
1040   CodeMap* codemap_;
1041   WasmInstance* instance_;
1042   ZoneVector<WasmVal> stack_;
1043   ZoneVector<Frame> frames_;
1044   ZoneVector<Block> blocks_;
1045   WasmInterpreter::State state_ = WasmInterpreter::STOPPED;
1046   pc_t break_pc_ = kInvalidPc;
1047   TrapReason trap_reason_ = kTrapCount;
1048   bool possible_nondeterminism_ = false;
1049   uint8_t break_flags_ = 0;  // a combination of WasmInterpreter::BreakFlag
1050   uint64_t num_interpreted_calls_ = 0;
1051 
codemap()1052   CodeMap* codemap() { return codemap_; }
instance()1053   WasmInstance* instance() { return instance_; }
module()1054   const WasmModule* module() { return instance_->module; }
1055 
DoTrap(TrapReason trap,pc_t pc)1056   void DoTrap(TrapReason trap, pc_t pc) {
1057     state_ = WasmInterpreter::TRAPPED;
1058     trap_reason_ = trap;
1059     CommitPc(pc);
1060   }
1061 
1062   // Push a frame with arguments already on the stack.
PushFrame(InterpreterCode * code,pc_t call_pc,pc_t ret_pc)1063   void PushFrame(InterpreterCode* code, pc_t call_pc, pc_t ret_pc) {
1064     CHECK_NOT_NULL(code);
1065     DCHECK(!frames_.empty());
1066     ++num_interpreted_calls_;
1067     frames_.back().call_pc = call_pc;
1068     frames_.back().ret_pc = ret_pc;
1069     size_t arity = code->function->sig->parameter_count();
1070     DCHECK_GE(stack_.size(), arity);
1071     // The parameters will overlap the arguments already on the stack.
1072     frames_.push_back({code, 0, 0, stack_.size() - arity});
1073     blocks_.push_back(
1074         {0, stack_.size(), frames_.size(),
1075          static_cast<uint32_t>(code->function->sig->return_count())});
1076     frames_.back().ret_pc = InitLocals(code);
1077     TRACE("  => push func#%u @%zu\n", code->function->func_index,
1078           frames_.back().ret_pc);
1079   }
1080 
InitLocals(InterpreterCode * code)1081   pc_t InitLocals(InterpreterCode* code) {
1082     for (auto p : code->locals.type_list) {
1083       WasmVal val;
1084       switch (p) {
1085         case kWasmI32:
1086           val = WasmVal(static_cast<int32_t>(0));
1087           break;
1088         case kWasmI64:
1089           val = WasmVal(static_cast<int64_t>(0));
1090           break;
1091         case kWasmF32:
1092           val = WasmVal(static_cast<float>(0));
1093           break;
1094         case kWasmF64:
1095           val = WasmVal(static_cast<double>(0));
1096           break;
1097         default:
1098           UNREACHABLE();
1099           break;
1100       }
1101       stack_.push_back(val);
1102     }
1103     return code->locals.encoded_size;
1104   }
1105 
CommitPc(pc_t pc)1106   void CommitPc(pc_t pc) {
1107     if (!frames_.empty()) {
1108       frames_.back().ret_pc = pc;
1109     }
1110   }
1111 
SkipBreakpoint(InterpreterCode * code,pc_t pc)1112   bool SkipBreakpoint(InterpreterCode* code, pc_t pc) {
1113     if (pc == break_pc_) {
1114       // Skip the previously hit breakpoint when resuming.
1115       break_pc_ = kInvalidPc;
1116       return true;
1117     }
1118     return false;
1119   }
1120 
LookupTarget(InterpreterCode * code,pc_t pc)1121   int LookupTarget(InterpreterCode* code, pc_t pc) {
1122     return static_cast<int>(code->targets->Lookup(pc));
1123   }
1124 
DoBreak(InterpreterCode * code,pc_t pc,size_t depth)1125   int DoBreak(InterpreterCode* code, pc_t pc, size_t depth) {
1126     size_t bp = blocks_.size() - depth - 1;
1127     Block* target = &blocks_[bp];
1128     DoStackTransfer(target->sp, target->arity);
1129     blocks_.resize(bp);
1130     return LookupTarget(code, pc);
1131   }
1132 
DoReturn(InterpreterCode ** code,pc_t * pc,pc_t * limit,size_t arity)1133   bool DoReturn(InterpreterCode** code, pc_t* pc, pc_t* limit, size_t arity) {
1134     DCHECK_GT(frames_.size(), 0);
1135     // Pop all blocks for this frame.
1136     while (!blocks_.empty() && blocks_.back().fp == frames_.size()) {
1137       blocks_.pop_back();
1138     }
1139 
1140     sp_t dest = frames_.back().sp;
1141     frames_.pop_back();
1142     if (frames_.size() == 0) {
1143       // A return from the last frame terminates the execution.
1144       state_ = WasmInterpreter::FINISHED;
1145       DoStackTransfer(0, arity);
1146       TRACE("  => finish\n");
1147       return false;
1148     } else {
1149       // Return to caller frame.
1150       Frame* top = &frames_.back();
1151       *code = top->code;
1152       *pc = top->ret_pc;
1153       *limit = top->code->end - top->code->start;
1154       TRACE("  => pop func#%u @%zu\n", (*code)->function->func_index, *pc);
1155       DoStackTransfer(dest, arity);
1156       return true;
1157     }
1158   }
1159 
DoCall(InterpreterCode * target,pc_t * pc,pc_t ret_pc,pc_t * limit)1160   void DoCall(InterpreterCode* target, pc_t* pc, pc_t ret_pc, pc_t* limit) {
1161     PushFrame(target, *pc, ret_pc);
1162     *pc = frames_.back().ret_pc;
1163     *limit = target->end - target->start;
1164   }
1165 
1166   // Copies {arity} values on the top of the stack down the stack to {dest},
1167   // dropping the values in-between.
DoStackTransfer(sp_t dest,size_t arity)1168   void DoStackTransfer(sp_t dest, size_t arity) {
1169     // before: |---------------| pop_count | arity |
1170     //         ^ 0             ^ dest              ^ stack_.size()
1171     //
1172     // after:  |---------------| arity |
1173     //         ^ 0                     ^ stack_.size()
1174     DCHECK_LE(dest, stack_.size());
1175     DCHECK_LE(dest + arity, stack_.size());
1176     size_t pop_count = stack_.size() - dest - arity;
1177     for (size_t i = 0; i < arity; i++) {
1178       stack_[dest + i] = stack_[dest + pop_count + i];
1179     }
1180     stack_.resize(stack_.size() - pop_count);
1181   }
1182 
1183   template <typename ctype, typename mtype>
ExecuteLoad(Decoder * decoder,InterpreterCode * code,pc_t pc,int & len)1184   bool ExecuteLoad(Decoder* decoder, InterpreterCode* code, pc_t pc, int& len) {
1185     MemoryAccessOperand operand(decoder, code->at(pc), sizeof(ctype));
1186     uint32_t index = Pop().to<uint32_t>();
1187     size_t effective_mem_size = instance()->mem_size - sizeof(mtype);
1188     if (operand.offset > effective_mem_size ||
1189         index > (effective_mem_size - operand.offset)) {
1190       DoTrap(kTrapMemOutOfBounds, pc);
1191       return false;
1192     }
1193     byte* addr = instance()->mem_start + operand.offset + index;
1194     WasmVal result(static_cast<ctype>(ReadLittleEndianValue<mtype>(addr)));
1195 
1196     Push(pc, result);
1197     len = 1 + operand.length;
1198     return true;
1199   }
1200 
1201   template <typename ctype, typename mtype>
ExecuteStore(Decoder * decoder,InterpreterCode * code,pc_t pc,int & len)1202   bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc,
1203                     int& len) {
1204     MemoryAccessOperand operand(decoder, code->at(pc), sizeof(ctype));
1205     WasmVal val = Pop();
1206 
1207     uint32_t index = Pop().to<uint32_t>();
1208     size_t effective_mem_size = instance()->mem_size - sizeof(mtype);
1209     if (operand.offset > effective_mem_size ||
1210         index > (effective_mem_size - operand.offset)) {
1211       DoTrap(kTrapMemOutOfBounds, pc);
1212       return false;
1213     }
1214     byte* addr = instance()->mem_start + operand.offset + index;
1215     WriteLittleEndianValue<mtype>(addr, static_cast<mtype>(val.to<ctype>()));
1216     len = 1 + operand.length;
1217 
1218     if (std::is_same<float, ctype>::value) {
1219       possible_nondeterminism_ |= std::isnan(val.to<float>());
1220     } else if (std::is_same<double, ctype>::value) {
1221       possible_nondeterminism_ |= std::isnan(val.to<double>());
1222     }
1223     return true;
1224   }
1225 
Execute(InterpreterCode * code,pc_t pc,int max)1226   void Execute(InterpreterCode* code, pc_t pc, int max) {
1227     Decoder decoder(code->start, code->end);
1228     pc_t limit = code->end - code->start;
1229     while (--max >= 0) {
1230 #define PAUSE_IF_BREAK_FLAG(flag) \
1231   if (V8_UNLIKELY(break_flags_ & WasmInterpreter::BreakFlag::flag)) max = 0;
1232 
1233       DCHECK_GT(limit, pc);
1234 
1235       const char* skip = "        ";
1236       int len = 1;
1237       byte opcode = code->start[pc];
1238       byte orig = opcode;
1239       if (V8_UNLIKELY(opcode == kInternalBreakpoint)) {
1240         orig = code->orig_start[pc];
1241         if (SkipBreakpoint(code, pc)) {
1242           // skip breakpoint by switching on original code.
1243           skip = "[skip]  ";
1244         } else {
1245           TRACE("@%-3zu: [break] %-24s:", pc,
1246                 WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(orig)));
1247           TraceValueStack();
1248           TRACE("\n");
1249           break;
1250         }
1251       }
1252 
1253       USE(skip);
1254       TRACE("@%-3zu: %s%-24s:", pc, skip,
1255             WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(orig)));
1256       TraceValueStack();
1257       TRACE("\n");
1258 
1259       switch (orig) {
1260         case kExprNop:
1261           break;
1262         case kExprBlock: {
1263           BlockTypeOperand operand(&decoder, code->at(pc));
1264           blocks_.push_back({pc, stack_.size(), frames_.size(), operand.arity});
1265           len = 1 + operand.length;
1266           break;
1267         }
1268         case kExprLoop: {
1269           BlockTypeOperand operand(&decoder, code->at(pc));
1270           blocks_.push_back({pc, stack_.size(), frames_.size(), 0});
1271           len = 1 + operand.length;
1272           break;
1273         }
1274         case kExprIf: {
1275           BlockTypeOperand operand(&decoder, code->at(pc));
1276           WasmVal cond = Pop();
1277           bool is_true = cond.to<uint32_t>() != 0;
1278           blocks_.push_back({pc, stack_.size(), frames_.size(), operand.arity});
1279           if (is_true) {
1280             // fall through to the true block.
1281             len = 1 + operand.length;
1282             TRACE("  true => fallthrough\n");
1283           } else {
1284             len = LookupTarget(code, pc);
1285             TRACE("  false => @%zu\n", pc + len);
1286           }
1287           break;
1288         }
1289         case kExprElse: {
1290           blocks_.pop_back();
1291           len = LookupTarget(code, pc);
1292           TRACE("  end => @%zu\n", pc + len);
1293           break;
1294         }
1295         case kExprSelect: {
1296           WasmVal cond = Pop();
1297           WasmVal fval = Pop();
1298           WasmVal tval = Pop();
1299           Push(pc, cond.to<int32_t>() != 0 ? tval : fval);
1300           break;
1301         }
1302         case kExprBr: {
1303           BreakDepthOperand operand(&decoder, code->at(pc));
1304           len = DoBreak(code, pc, operand.depth);
1305           TRACE("  br => @%zu\n", pc + len);
1306           break;
1307         }
1308         case kExprBrIf: {
1309           BreakDepthOperand operand(&decoder, code->at(pc));
1310           WasmVal cond = Pop();
1311           bool is_true = cond.to<uint32_t>() != 0;
1312           if (is_true) {
1313             len = DoBreak(code, pc, operand.depth);
1314             TRACE("  br_if => @%zu\n", pc + len);
1315           } else {
1316             TRACE("  false => fallthrough\n");
1317             len = 1 + operand.length;
1318           }
1319           break;
1320         }
1321         case kExprBrTable: {
1322           BranchTableOperand operand(&decoder, code->at(pc));
1323           BranchTableIterator iterator(&decoder, operand);
1324           uint32_t key = Pop().to<uint32_t>();
1325           uint32_t depth = 0;
1326           if (key >= operand.table_count) key = operand.table_count;
1327           for (uint32_t i = 0; i <= key; i++) {
1328             DCHECK(iterator.has_next());
1329             depth = iterator.next();
1330           }
1331           len = key + DoBreak(code, pc + key, static_cast<size_t>(depth));
1332           TRACE("  br[%u] => @%zu\n", key, pc + key + len);
1333           break;
1334         }
1335         case kExprReturn: {
1336           size_t arity = code->function->sig->return_count();
1337           if (!DoReturn(&code, &pc, &limit, arity)) return;
1338           decoder.Reset(code->start, code->end);
1339           PAUSE_IF_BREAK_FLAG(AfterReturn);
1340           continue;
1341         }
1342         case kExprUnreachable: {
1343           DoTrap(kTrapUnreachable, pc);
1344           return CommitPc(pc);
1345         }
1346         case kExprEnd: {
1347           blocks_.pop_back();
1348           break;
1349         }
1350         case kExprI32Const: {
1351           ImmI32Operand operand(&decoder, code->at(pc));
1352           Push(pc, WasmVal(operand.value));
1353           len = 1 + operand.length;
1354           break;
1355         }
1356         case kExprI64Const: {
1357           ImmI64Operand operand(&decoder, code->at(pc));
1358           Push(pc, WasmVal(operand.value));
1359           len = 1 + operand.length;
1360           break;
1361         }
1362         case kExprF32Const: {
1363           ImmF32Operand operand(&decoder, code->at(pc));
1364           Push(pc, WasmVal(operand.value));
1365           len = 1 + operand.length;
1366           break;
1367         }
1368         case kExprF64Const: {
1369           ImmF64Operand operand(&decoder, code->at(pc));
1370           Push(pc, WasmVal(operand.value));
1371           len = 1 + operand.length;
1372           break;
1373         }
1374         case kExprGetLocal: {
1375           LocalIndexOperand operand(&decoder, code->at(pc));
1376           Push(pc, stack_[frames_.back().sp + operand.index]);
1377           len = 1 + operand.length;
1378           break;
1379         }
1380         case kExprSetLocal: {
1381           LocalIndexOperand operand(&decoder, code->at(pc));
1382           WasmVal val = Pop();
1383           stack_[frames_.back().sp + operand.index] = val;
1384           len = 1 + operand.length;
1385           break;
1386         }
1387         case kExprTeeLocal: {
1388           LocalIndexOperand operand(&decoder, code->at(pc));
1389           WasmVal val = Pop();
1390           stack_[frames_.back().sp + operand.index] = val;
1391           Push(pc, val);
1392           len = 1 + operand.length;
1393           break;
1394         }
1395         case kExprDrop: {
1396           Pop();
1397           break;
1398         }
1399         case kExprCallFunction: {
1400           CallFunctionOperand operand(&decoder, code->at(pc));
1401           InterpreterCode* target = codemap()->GetCode(operand.index);
1402           DoCall(target, &pc, pc + 1 + operand.length, &limit);
1403           code = target;
1404           decoder.Reset(code->start, code->end);
1405           PAUSE_IF_BREAK_FLAG(AfterCall);
1406           continue;
1407         }
1408         case kExprCallIndirect: {
1409           CallIndirectOperand operand(&decoder, code->at(pc));
1410           uint32_t entry_index = Pop().to<uint32_t>();
1411           // Assume only one table for now.
1412           DCHECK_LE(module()->function_tables.size(), 1u);
1413           InterpreterCode* target = codemap()->GetIndirectCode(0, entry_index);
1414           if (target == nullptr) {
1415             return DoTrap(kTrapFuncInvalid, pc);
1416           } else if (target->function->sig_index != operand.index) {
1417             // If not an exact match, we have to do a canonical check.
1418             // TODO(titzer): make this faster with some kind of caching?
1419             const WasmIndirectFunctionTable* table =
1420                 &module()->function_tables[0];
1421             int function_key = table->map.Find(target->function->sig);
1422             if (function_key < 0 ||
1423                 (function_key !=
1424                  table->map.Find(module()->signatures[operand.index]))) {
1425               return DoTrap(kTrapFuncSigMismatch, pc);
1426             }
1427           }
1428 
1429           DoCall(target, &pc, pc + 1 + operand.length, &limit);
1430           code = target;
1431           decoder.Reset(code->start, code->end);
1432           PAUSE_IF_BREAK_FLAG(AfterCall);
1433           continue;
1434         }
1435         case kExprGetGlobal: {
1436           GlobalIndexOperand operand(&decoder, code->at(pc));
1437           const WasmGlobal* global = &module()->globals[operand.index];
1438           byte* ptr = instance()->globals_start + global->offset;
1439           ValueType type = global->type;
1440           WasmVal val;
1441           if (type == kWasmI32) {
1442             val = WasmVal(*reinterpret_cast<int32_t*>(ptr));
1443           } else if (type == kWasmI64) {
1444             val = WasmVal(*reinterpret_cast<int64_t*>(ptr));
1445           } else if (type == kWasmF32) {
1446             val = WasmVal(*reinterpret_cast<float*>(ptr));
1447           } else if (type == kWasmF64) {
1448             val = WasmVal(*reinterpret_cast<double*>(ptr));
1449           } else {
1450             UNREACHABLE();
1451           }
1452           Push(pc, val);
1453           len = 1 + operand.length;
1454           break;
1455         }
1456         case kExprSetGlobal: {
1457           GlobalIndexOperand operand(&decoder, code->at(pc));
1458           const WasmGlobal* global = &module()->globals[operand.index];
1459           byte* ptr = instance()->globals_start + global->offset;
1460           ValueType type = global->type;
1461           WasmVal val = Pop();
1462           if (type == kWasmI32) {
1463             *reinterpret_cast<int32_t*>(ptr) = val.to<int32_t>();
1464           } else if (type == kWasmI64) {
1465             *reinterpret_cast<int64_t*>(ptr) = val.to<int64_t>();
1466           } else if (type == kWasmF32) {
1467             *reinterpret_cast<float*>(ptr) = val.to<float>();
1468           } else if (type == kWasmF64) {
1469             *reinterpret_cast<double*>(ptr) = val.to<double>();
1470           } else {
1471             UNREACHABLE();
1472           }
1473           len = 1 + operand.length;
1474           break;
1475         }
1476 
1477 #define LOAD_CASE(name, ctype, mtype)                                \
1478   case kExpr##name: {                                                \
1479     if (!ExecuteLoad<ctype, mtype>(&decoder, code, pc, len)) return; \
1480     break;                                                           \
1481   }
1482 
1483           LOAD_CASE(I32LoadMem8S, int32_t, int8_t);
1484           LOAD_CASE(I32LoadMem8U, int32_t, uint8_t);
1485           LOAD_CASE(I32LoadMem16S, int32_t, int16_t);
1486           LOAD_CASE(I32LoadMem16U, int32_t, uint16_t);
1487           LOAD_CASE(I64LoadMem8S, int64_t, int8_t);
1488           LOAD_CASE(I64LoadMem8U, int64_t, uint8_t);
1489           LOAD_CASE(I64LoadMem16S, int64_t, int16_t);
1490           LOAD_CASE(I64LoadMem16U, int64_t, uint16_t);
1491           LOAD_CASE(I64LoadMem32S, int64_t, int32_t);
1492           LOAD_CASE(I64LoadMem32U, int64_t, uint32_t);
1493           LOAD_CASE(I32LoadMem, int32_t, int32_t);
1494           LOAD_CASE(I64LoadMem, int64_t, int64_t);
1495           LOAD_CASE(F32LoadMem, float, float);
1496           LOAD_CASE(F64LoadMem, double, double);
1497 #undef LOAD_CASE
1498 
1499 #define STORE_CASE(name, ctype, mtype)                                \
1500   case kExpr##name: {                                                 \
1501     if (!ExecuteStore<ctype, mtype>(&decoder, code, pc, len)) return; \
1502     break;                                                            \
1503   }
1504 
1505           STORE_CASE(I32StoreMem8, int32_t, int8_t);
1506           STORE_CASE(I32StoreMem16, int32_t, int16_t);
1507           STORE_CASE(I64StoreMem8, int64_t, int8_t);
1508           STORE_CASE(I64StoreMem16, int64_t, int16_t);
1509           STORE_CASE(I64StoreMem32, int64_t, int32_t);
1510           STORE_CASE(I32StoreMem, int32_t, int32_t);
1511           STORE_CASE(I64StoreMem, int64_t, int64_t);
1512           STORE_CASE(F32StoreMem, float, float);
1513           STORE_CASE(F64StoreMem, double, double);
1514 #undef STORE_CASE
1515 
1516 #define ASMJS_LOAD_CASE(name, ctype, mtype, defval)                 \
1517   case kExpr##name: {                                               \
1518     uint32_t index = Pop().to<uint32_t>();                          \
1519     ctype result;                                                   \
1520     if (index >= (instance()->mem_size - sizeof(mtype))) {          \
1521       result = defval;                                              \
1522     } else {                                                        \
1523       byte* addr = instance()->mem_start + index;                   \
1524       /* TODO(titzer): alignment for asmjs load mem? */             \
1525       result = static_cast<ctype>(*reinterpret_cast<mtype*>(addr)); \
1526     }                                                               \
1527     Push(pc, WasmVal(result));                                      \
1528     break;                                                          \
1529   }
1530           ASMJS_LOAD_CASE(I32AsmjsLoadMem8S, int32_t, int8_t, 0);
1531           ASMJS_LOAD_CASE(I32AsmjsLoadMem8U, int32_t, uint8_t, 0);
1532           ASMJS_LOAD_CASE(I32AsmjsLoadMem16S, int32_t, int16_t, 0);
1533           ASMJS_LOAD_CASE(I32AsmjsLoadMem16U, int32_t, uint16_t, 0);
1534           ASMJS_LOAD_CASE(I32AsmjsLoadMem, int32_t, int32_t, 0);
1535           ASMJS_LOAD_CASE(F32AsmjsLoadMem, float, float,
1536                           std::numeric_limits<float>::quiet_NaN());
1537           ASMJS_LOAD_CASE(F64AsmjsLoadMem, double, double,
1538                           std::numeric_limits<double>::quiet_NaN());
1539 #undef ASMJS_LOAD_CASE
1540 
1541 #define ASMJS_STORE_CASE(name, ctype, mtype)                                   \
1542   case kExpr##name: {                                                          \
1543     WasmVal val = Pop();                                                       \
1544     uint32_t index = Pop().to<uint32_t>();                                     \
1545     if (index < (instance()->mem_size - sizeof(mtype))) {                      \
1546       byte* addr = instance()->mem_start + index;                              \
1547       /* TODO(titzer): alignment for asmjs store mem? */                       \
1548       *(reinterpret_cast<mtype*>(addr)) = static_cast<mtype>(val.to<ctype>()); \
1549     }                                                                          \
1550     Push(pc, val);                                                             \
1551     break;                                                                     \
1552   }
1553 
1554           ASMJS_STORE_CASE(I32AsmjsStoreMem8, int32_t, int8_t);
1555           ASMJS_STORE_CASE(I32AsmjsStoreMem16, int32_t, int16_t);
1556           ASMJS_STORE_CASE(I32AsmjsStoreMem, int32_t, int32_t);
1557           ASMJS_STORE_CASE(F32AsmjsStoreMem, float, float);
1558           ASMJS_STORE_CASE(F64AsmjsStoreMem, double, double);
1559 #undef ASMJS_STORE_CASE
1560         case kExprGrowMemory: {
1561           MemoryIndexOperand operand(&decoder, code->at(pc));
1562           uint32_t delta_pages = Pop().to<uint32_t>();
1563           Push(pc, WasmVal(ExecuteGrowMemory(delta_pages, instance())));
1564           len = 1 + operand.length;
1565           break;
1566         }
1567         case kExprMemorySize: {
1568           MemoryIndexOperand operand(&decoder, code->at(pc));
1569           Push(pc, WasmVal(static_cast<uint32_t>(instance()->mem_size /
1570                                                  WasmModule::kPageSize)));
1571           len = 1 + operand.length;
1572           break;
1573         }
1574         // We need to treat kExprI32ReinterpretF32 and kExprI64ReinterpretF64
1575         // specially to guarantee that the quiet bit of a NaN is preserved on
1576         // ia32 by the reinterpret casts.
1577         case kExprI32ReinterpretF32: {
1578           WasmVal val = Pop();
1579           WasmVal result(ExecuteI32ReinterpretF32(val));
1580           Push(pc, result);
1581           possible_nondeterminism_ |= std::isnan(val.to<float>());
1582           break;
1583         }
1584         case kExprI64ReinterpretF64: {
1585           WasmVal val = Pop();
1586           WasmVal result(ExecuteI64ReinterpretF64(val));
1587           Push(pc, result);
1588           possible_nondeterminism_ |= std::isnan(val.to<double>());
1589           break;
1590         }
1591 #define EXECUTE_SIMPLE_BINOP(name, ctype, op)             \
1592   case kExpr##name: {                                     \
1593     WasmVal rval = Pop();                                 \
1594     WasmVal lval = Pop();                                 \
1595     WasmVal result(lval.to<ctype>() op rval.to<ctype>()); \
1596     Push(pc, result);                                     \
1597     break;                                                \
1598   }
1599           FOREACH_SIMPLE_BINOP(EXECUTE_SIMPLE_BINOP)
1600 #undef EXECUTE_SIMPLE_BINOP
1601 
1602 #define EXECUTE_OTHER_BINOP(name, ctype)              \
1603   case kExpr##name: {                                 \
1604     TrapReason trap = kTrapCount;                     \
1605     volatile ctype rval = Pop().to<ctype>();          \
1606     volatile ctype lval = Pop().to<ctype>();          \
1607     WasmVal result(Execute##name(lval, rval, &trap)); \
1608     if (trap != kTrapCount) return DoTrap(trap, pc);  \
1609     Push(pc, result);                                 \
1610     break;                                            \
1611   }
1612           FOREACH_OTHER_BINOP(EXECUTE_OTHER_BINOP)
1613 #undef EXECUTE_OTHER_BINOP
1614 
1615         case kExprF32CopySign: {
1616           // Handle kExprF32CopySign separately because it may introduce
1617           // observable non-determinism.
1618           TrapReason trap = kTrapCount;
1619           volatile float rval = Pop().to<float>();
1620           volatile float lval = Pop().to<float>();
1621           WasmVal result(ExecuteF32CopySign(lval, rval, &trap));
1622           Push(pc, result);
1623           possible_nondeterminism_ |= std::isnan(rval);
1624           break;
1625         }
1626         case kExprF64CopySign: {
1627           // Handle kExprF32CopySign separately because it may introduce
1628           // observable non-determinism.
1629           TrapReason trap = kTrapCount;
1630           volatile double rval = Pop().to<double>();
1631           volatile double lval = Pop().to<double>();
1632           WasmVal result(ExecuteF64CopySign(lval, rval, &trap));
1633           Push(pc, result);
1634           possible_nondeterminism_ |= std::isnan(rval);
1635           break;
1636         }
1637 #define EXECUTE_OTHER_UNOP(name, ctype)              \
1638   case kExpr##name: {                                \
1639     TrapReason trap = kTrapCount;                    \
1640     volatile ctype val = Pop().to<ctype>();          \
1641     WasmVal result(Execute##name(val, &trap));       \
1642     if (trap != kTrapCount) return DoTrap(trap, pc); \
1643     Push(pc, result);                                \
1644     break;                                           \
1645   }
1646           FOREACH_OTHER_UNOP(EXECUTE_OTHER_UNOP)
1647 #undef EXECUTE_OTHER_UNOP
1648 
1649         default:
1650           V8_Fatal(__FILE__, __LINE__, "Unknown or unimplemented opcode #%d:%s",
1651                    code->start[pc], OpcodeName(code->start[pc]));
1652           UNREACHABLE();
1653       }
1654 
1655       pc += len;
1656       if (pc == limit) {
1657         // Fell off end of code; do an implicit return.
1658         TRACE("@%-3zu: ImplicitReturn\n", pc);
1659         if (!DoReturn(&code, &pc, &limit, code->function->sig->return_count()))
1660           return;
1661         decoder.Reset(code->start, code->end);
1662         PAUSE_IF_BREAK_FLAG(AfterReturn);
1663       }
1664     }
1665     // Set break_pc_, even though we might have stopped because max was reached.
1666     // We don't want to stop after executing zero instructions next time.
1667     break_pc_ = pc;
1668     state_ = WasmInterpreter::PAUSED;
1669     CommitPc(pc);
1670   }
1671 
Pop()1672   WasmVal Pop() {
1673     DCHECK_GT(stack_.size(), 0);
1674     DCHECK_GT(frames_.size(), 0);
1675     DCHECK_GT(stack_.size(), frames_.back().llimit());  // can't pop into locals
1676     WasmVal val = stack_.back();
1677     stack_.pop_back();
1678     return val;
1679   }
1680 
PopN(int n)1681   void PopN(int n) {
1682     DCHECK_GE(stack_.size(), n);
1683     DCHECK_GT(frames_.size(), 0);
1684     size_t nsize = stack_.size() - n;
1685     DCHECK_GE(nsize, frames_.back().llimit());  // can't pop into locals
1686     stack_.resize(nsize);
1687   }
1688 
PopArity(size_t arity)1689   WasmVal PopArity(size_t arity) {
1690     if (arity == 0) return WasmVal();
1691     CHECK_EQ(1, arity);
1692     return Pop();
1693   }
1694 
Push(pc_t pc,WasmVal val)1695   void Push(pc_t pc, WasmVal val) {
1696     // TODO(titzer): store PC as well?
1697     if (val.type != kWasmStmt) stack_.push_back(val);
1698   }
1699 
TraceStack(const char * phase,pc_t pc)1700   void TraceStack(const char* phase, pc_t pc) {
1701     if (FLAG_trace_wasm_interpreter) {
1702       PrintF("%s @%zu", phase, pc);
1703       UNIMPLEMENTED();
1704       PrintF("\n");
1705     }
1706   }
1707 
TraceValueStack()1708   void TraceValueStack() {
1709 #ifdef DEBUG
1710     Frame* top = frames_.size() > 0 ? &frames_.back() : nullptr;
1711     sp_t sp = top ? top->sp : 0;
1712     sp_t plimit = top ? top->plimit() : 0;
1713     sp_t llimit = top ? top->llimit() : 0;
1714     if (FLAG_trace_wasm_interpreter) {
1715       for (size_t i = sp; i < stack_.size(); ++i) {
1716         if (i < plimit)
1717           PrintF(" p%zu:", i);
1718         else if (i < llimit)
1719           PrintF(" l%zu:", i);
1720         else
1721           PrintF(" s%zu:", i);
1722         WasmVal val = stack_[i];
1723         switch (val.type) {
1724           case kWasmI32:
1725             PrintF("i32:%d", val.to<int32_t>());
1726             break;
1727           case kWasmI64:
1728             PrintF("i64:%" PRId64 "", val.to<int64_t>());
1729             break;
1730           case kWasmF32:
1731             PrintF("f32:%f", val.to<float>());
1732             break;
1733           case kWasmF64:
1734             PrintF("f64:%lf", val.to<double>());
1735             break;
1736           case kWasmStmt:
1737             PrintF("void");
1738             break;
1739           default:
1740             UNREACHABLE();
1741             break;
1742         }
1743       }
1744     }
1745 #endif  // DEBUG
1746   }
1747 };
1748 
1749 // Converters between WasmInterpreter::Thread and WasmInterpreter::ThreadImpl.
1750 // Thread* is the public interface, without knowledge of the object layout.
1751 // This cast is potentially risky, but as long as we always cast it back before
1752 // accessing any data, it should be fine. UBSan is not complaining.
ToThread(ThreadImpl * impl)1753 WasmInterpreter::Thread* ToThread(ThreadImpl* impl) {
1754   return reinterpret_cast<WasmInterpreter::Thread*>(impl);
1755 }
ToImpl(WasmInterpreter::Thread * thread)1756 static ThreadImpl* ToImpl(WasmInterpreter::Thread* thread) {
1757   return reinterpret_cast<ThreadImpl*>(thread);
1758 }
1759 }  // namespace
1760 
1761 //============================================================================
1762 // Implementation of the pimpl idiom for WasmInterpreter::Thread.
1763 // Instead of placing a pointer to the ThreadImpl inside of the Thread object,
1764 // we just reinterpret_cast them. ThreadImpls are only allocated inside this
1765 // translation unit anyway.
1766 //============================================================================
state()1767 WasmInterpreter::State WasmInterpreter::Thread::state() {
1768   return ToImpl(this)->state();
1769 }
PushFrame(const WasmFunction * function,WasmVal * args)1770 void WasmInterpreter::Thread::PushFrame(const WasmFunction* function,
1771                                         WasmVal* args) {
1772   return ToImpl(this)->PushFrame(function, args);
1773 }
Run()1774 WasmInterpreter::State WasmInterpreter::Thread::Run() {
1775   return ToImpl(this)->Run();
1776 }
Step()1777 WasmInterpreter::State WasmInterpreter::Thread::Step() {
1778   return ToImpl(this)->Step();
1779 }
Pause()1780 void WasmInterpreter::Thread::Pause() { return ToImpl(this)->Pause(); }
Reset()1781 void WasmInterpreter::Thread::Reset() { return ToImpl(this)->Reset(); }
GetBreakpointPc()1782 pc_t WasmInterpreter::Thread::GetBreakpointPc() {
1783   return ToImpl(this)->GetBreakpointPc();
1784 }
GetFrameCount()1785 int WasmInterpreter::Thread::GetFrameCount() {
1786   return ToImpl(this)->GetFrameCount();
1787 }
GetFrame(int index)1788 const InterpretedFrame WasmInterpreter::Thread::GetFrame(int index) {
1789   return GetMutableFrame(index);
1790 }
GetMutableFrame(int index)1791 InterpretedFrame WasmInterpreter::Thread::GetMutableFrame(int index) {
1792   // We have access to the constructor of InterpretedFrame, but ThreadImpl has
1793   // not. So pass it as a lambda (should all get inlined).
1794   auto frame_cons = [](const WasmFunction* function, int pc, int fp, int sp) {
1795     return InterpretedFrame(function, pc, fp, sp);
1796   };
1797   return ToImpl(this)->GetMutableFrame(index, frame_cons);
1798 }
GetReturnValue(int index)1799 WasmVal WasmInterpreter::Thread::GetReturnValue(int index) {
1800   return ToImpl(this)->GetReturnValue(index);
1801 }
PossibleNondeterminism()1802 bool WasmInterpreter::Thread::PossibleNondeterminism() {
1803   return ToImpl(this)->PossibleNondeterminism();
1804 }
NumInterpretedCalls()1805 uint64_t WasmInterpreter::Thread::NumInterpretedCalls() {
1806   return ToImpl(this)->NumInterpretedCalls();
1807 }
AddBreakFlags(uint8_t flags)1808 void WasmInterpreter::Thread::AddBreakFlags(uint8_t flags) {
1809   ToImpl(this)->AddBreakFlags(flags);
1810 }
ClearBreakFlags()1811 void WasmInterpreter::Thread::ClearBreakFlags() {
1812   ToImpl(this)->ClearBreakFlags();
1813 }
1814 
1815 //============================================================================
1816 // The implementation details of the interpreter.
1817 //============================================================================
1818 class WasmInterpreterInternals : public ZoneObject {
1819  public:
1820   WasmInstance* instance_;
1821   // Create a copy of the module bytes for the interpreter, since the passed
1822   // pointer might be invalidated after constructing the interpreter.
1823   const ZoneVector<uint8_t> module_bytes_;
1824   CodeMap codemap_;
1825   ZoneVector<ThreadImpl> threads_;
1826 
WasmInterpreterInternals(Zone * zone,const ModuleBytesEnv & env)1827   WasmInterpreterInternals(Zone* zone, const ModuleBytesEnv& env)
1828       : instance_(env.module_env.instance),
1829         module_bytes_(env.wire_bytes.start(), env.wire_bytes.end(), zone),
1830         codemap_(
1831             env.module_env.instance ? env.module_env.instance->module : nullptr,
1832             module_bytes_.data(), zone),
1833         threads_(zone) {
1834     threads_.emplace_back(zone, &codemap_, env.module_env.instance);
1835   }
1836 
Delete()1837   void Delete() { threads_.clear(); }
1838 };
1839 
1840 //============================================================================
1841 // Implementation of the public interface of the interpreter.
1842 //============================================================================
WasmInterpreter(const ModuleBytesEnv & env,AccountingAllocator * allocator)1843 WasmInterpreter::WasmInterpreter(const ModuleBytesEnv& env,
1844                                  AccountingAllocator* allocator)
1845     : zone_(allocator, ZONE_NAME),
1846       internals_(new (&zone_) WasmInterpreterInternals(&zone_, env)) {}
1847 
~WasmInterpreter()1848 WasmInterpreter::~WasmInterpreter() { internals_->Delete(); }
1849 
Run()1850 void WasmInterpreter::Run() { internals_->threads_[0].Run(); }
1851 
Pause()1852 void WasmInterpreter::Pause() { internals_->threads_[0].Pause(); }
1853 
SetBreakpoint(const WasmFunction * function,pc_t pc,bool enabled)1854 bool WasmInterpreter::SetBreakpoint(const WasmFunction* function, pc_t pc,
1855                                     bool enabled) {
1856   InterpreterCode* code = internals_->codemap_.FindCode(function);
1857   if (!code) return false;
1858   size_t size = static_cast<size_t>(code->end - code->start);
1859   // Check bounds for {pc}.
1860   if (pc < code->locals.encoded_size || pc >= size) return false;
1861   // Make a copy of the code before enabling a breakpoint.
1862   if (enabled && code->orig_start == code->start) {
1863     code->start = reinterpret_cast<byte*>(zone_.New(size));
1864     memcpy(code->start, code->orig_start, size);
1865     code->end = code->start + size;
1866   }
1867   bool prev = code->start[pc] == kInternalBreakpoint;
1868   if (enabled) {
1869     code->start[pc] = kInternalBreakpoint;
1870   } else {
1871     code->start[pc] = code->orig_start[pc];
1872   }
1873   return prev;
1874 }
1875 
GetBreakpoint(const WasmFunction * function,pc_t pc)1876 bool WasmInterpreter::GetBreakpoint(const WasmFunction* function, pc_t pc) {
1877   InterpreterCode* code = internals_->codemap_.FindCode(function);
1878   if (!code) return false;
1879   size_t size = static_cast<size_t>(code->end - code->start);
1880   // Check bounds for {pc}.
1881   if (pc < code->locals.encoded_size || pc >= size) return false;
1882   // Check if a breakpoint is present at that place in the code.
1883   return code->start[pc] == kInternalBreakpoint;
1884 }
1885 
SetTracing(const WasmFunction * function,bool enabled)1886 bool WasmInterpreter::SetTracing(const WasmFunction* function, bool enabled) {
1887   UNIMPLEMENTED();
1888   return false;
1889 }
1890 
GetThreadCount()1891 int WasmInterpreter::GetThreadCount() {
1892   return 1;  // only one thread for now.
1893 }
1894 
GetThread(int id)1895 WasmInterpreter::Thread* WasmInterpreter::GetThread(int id) {
1896   CHECK_EQ(0, id);  // only one thread for now.
1897   return ToThread(&internals_->threads_[id]);
1898 }
1899 
GetMemorySize()1900 size_t WasmInterpreter::GetMemorySize() {
1901   return internals_->instance_->mem_size;
1902 }
1903 
ReadMemory(size_t offset)1904 WasmVal WasmInterpreter::ReadMemory(size_t offset) {
1905   UNIMPLEMENTED();
1906   return WasmVal();
1907 }
1908 
WriteMemory(size_t offset,WasmVal val)1909 void WasmInterpreter::WriteMemory(size_t offset, WasmVal val) {
1910   UNIMPLEMENTED();
1911 }
1912 
AddFunctionForTesting(const WasmFunction * function)1913 int WasmInterpreter::AddFunctionForTesting(const WasmFunction* function) {
1914   return internals_->codemap_.AddFunction(function, nullptr, nullptr);
1915 }
1916 
SetFunctionCodeForTesting(const WasmFunction * function,const byte * start,const byte * end)1917 bool WasmInterpreter::SetFunctionCodeForTesting(const WasmFunction* function,
1918                                                 const byte* start,
1919                                                 const byte* end) {
1920   return internals_->codemap_.SetFunctionCode(function, start, end);
1921 }
1922 
ComputeControlTransfersForTesting(Zone * zone,const byte * start,const byte * end)1923 ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
1924     Zone* zone, const byte* start, const byte* end) {
1925   ControlTransfers targets(zone, nullptr, start, end);
1926   return targets.map_;
1927 }
1928 
1929 //============================================================================
1930 // Implementation of the frame inspection interface.
1931 //============================================================================
GetParameterCount() const1932 int InterpretedFrame::GetParameterCount() const {
1933   USE(fp_);
1934   USE(sp_);
1935   // TODO(clemensh): Return the correct number of parameters.
1936   return 0;
1937 }
1938 
GetLocalVal(int index) const1939 WasmVal InterpretedFrame::GetLocalVal(int index) const {
1940   CHECK_GE(index, 0);
1941   UNIMPLEMENTED();
1942   WasmVal none;
1943   none.type = kWasmStmt;
1944   return none;
1945 }
1946 
GetExprVal(int pc) const1947 WasmVal InterpretedFrame::GetExprVal(int pc) const {
1948   UNIMPLEMENTED();
1949   WasmVal none;
1950   none.type = kWasmStmt;
1951   return none;
1952 }
1953 
SetLocalVal(int index,WasmVal val)1954 void InterpretedFrame::SetLocalVal(int index, WasmVal val) { UNIMPLEMENTED(); }
1955 
SetExprVal(int pc,WasmVal val)1956 void InterpretedFrame::SetExprVal(int pc, WasmVal val) { UNIMPLEMENTED(); }
1957 
1958 }  // namespace wasm
1959 }  // namespace internal
1960 }  // namespace v8
1961