• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/interpreter/interpreter.h"
6 
7 #include <fstream>
8 #include <memory>
9 
10 #include "src/ast/prettyprinter.h"
11 #include "src/builtins/builtins-arguments.h"
12 #include "src/builtins/builtins-constructor.h"
13 #include "src/builtins/builtins-object.h"
14 #include "src/code-factory.h"
15 #include "src/compilation-info.h"
16 #include "src/compiler.h"
17 #include "src/counters.h"
18 #include "src/debug/debug.h"
19 #include "src/factory.h"
20 #include "src/ic/accessor-assembler.h"
21 #include "src/interpreter/bytecode-flags.h"
22 #include "src/interpreter/bytecode-generator.h"
23 #include "src/interpreter/bytecodes.h"
24 #include "src/interpreter/interpreter-assembler.h"
25 #include "src/interpreter/interpreter-intrinsics.h"
26 #include "src/log.h"
27 #include "src/objects-inl.h"
28 #include "src/zone/zone.h"
29 
30 namespace v8 {
31 namespace internal {
32 namespace interpreter {
33 
34 using compiler::Node;
35 typedef CodeStubAssembler::Label Label;
36 typedef CodeStubAssembler::Variable Variable;
37 
38 #define __ assembler->
39 
40 class InterpreterCompilationJob final : public CompilationJob {
41  public:
42   explicit InterpreterCompilationJob(CompilationInfo* info);
43 
44  protected:
45   Status PrepareJobImpl() final;
46   Status ExecuteJobImpl() final;
47   Status FinalizeJobImpl() final;
48 
49  private:
50   class TimerScope final {
51    public:
TimerScope(RuntimeCallStats * stats,RuntimeCallStats::CounterId counter_id)52     TimerScope(RuntimeCallStats* stats, RuntimeCallStats::CounterId counter_id)
53         : stats_(stats) {
54       if (V8_UNLIKELY(FLAG_runtime_stats)) {
55         RuntimeCallStats::Enter(stats_, &timer_, counter_id);
56       }
57     }
58 
TimerScope(RuntimeCallCounter * counter)59     explicit TimerScope(RuntimeCallCounter* counter) : stats_(nullptr) {
60       if (V8_UNLIKELY(FLAG_runtime_stats)) {
61         timer_.Start(counter, nullptr);
62       }
63     }
64 
~TimerScope()65     ~TimerScope() {
66       if (V8_UNLIKELY(FLAG_runtime_stats)) {
67         if (stats_) {
68           RuntimeCallStats::Leave(stats_, &timer_);
69         } else {
70           timer_.Stop();
71         }
72       }
73     }
74 
75    private:
76     RuntimeCallStats* stats_;
77     RuntimeCallTimer timer_;
78   };
79 
generator()80   BytecodeGenerator* generator() { return &generator_; }
81 
82   BytecodeGenerator generator_;
83   RuntimeCallStats* runtime_call_stats_;
84   RuntimeCallCounter background_execute_counter_;
85   bool print_bytecode_;
86 
87   DISALLOW_COPY_AND_ASSIGN(InterpreterCompilationJob);
88 };
89 
Interpreter(Isolate * isolate)90 Interpreter::Interpreter(Isolate* isolate) : isolate_(isolate) {
91   memset(dispatch_table_, 0, sizeof(dispatch_table_));
92 }
93 
Initialize()94 void Interpreter::Initialize() {
95   if (!ShouldInitializeDispatchTable()) return;
96   Zone zone(isolate_->allocator(), ZONE_NAME);
97   HandleScope scope(isolate_);
98 
99   if (FLAG_trace_ignition_dispatches) {
100     static const int kBytecodeCount = static_cast<int>(Bytecode::kLast) + 1;
101     bytecode_dispatch_counters_table_.reset(
102         new uintptr_t[kBytecodeCount * kBytecodeCount]);
103     memset(bytecode_dispatch_counters_table_.get(), 0,
104            sizeof(uintptr_t) * kBytecodeCount * kBytecodeCount);
105   }
106 
107   // Generate bytecode handlers for all bytecodes and scales.
108   const OperandScale kOperandScales[] = {
109 #define VALUE(Name, _) OperandScale::k##Name,
110       OPERAND_SCALE_LIST(VALUE)
111 #undef VALUE
112   };
113 
114   for (OperandScale operand_scale : kOperandScales) {
115 #define GENERATE_CODE(Name, ...)                                  \
116   InstallBytecodeHandler(&zone, Bytecode::k##Name, operand_scale, \
117                          &Interpreter::Do##Name);
118     BYTECODE_LIST(GENERATE_CODE)
119 #undef GENERATE_CODE
120   }
121 
122   // Fill unused entries will the illegal bytecode handler.
123   size_t illegal_index =
124       GetDispatchTableIndex(Bytecode::kIllegal, OperandScale::kSingle);
125   for (size_t index = 0; index < arraysize(dispatch_table_); ++index) {
126     if (dispatch_table_[index] == nullptr) {
127       dispatch_table_[index] = dispatch_table_[illegal_index];
128     }
129   }
130 
131   // Initialization should have been successful.
132   DCHECK(IsDispatchTableInitialized());
133 }
134 
InstallBytecodeHandler(Zone * zone,Bytecode bytecode,OperandScale operand_scale,BytecodeGeneratorFunc generator)135 void Interpreter::InstallBytecodeHandler(Zone* zone, Bytecode bytecode,
136                                          OperandScale operand_scale,
137                                          BytecodeGeneratorFunc generator) {
138   if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return;
139 
140   InterpreterDispatchDescriptor descriptor(isolate_);
141   compiler::CodeAssemblerState state(
142       isolate_, zone, descriptor, Code::ComputeFlags(Code::BYTECODE_HANDLER),
143       Bytecodes::ToString(bytecode), Bytecodes::ReturnCount(bytecode));
144   InterpreterAssembler assembler(&state, bytecode, operand_scale);
145   if (Bytecodes::MakesCallAlongCriticalPath(bytecode)) {
146     assembler.SaveBytecodeOffset();
147   }
148   (this->*generator)(&assembler);
149   Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
150   size_t index = GetDispatchTableIndex(bytecode, operand_scale);
151   dispatch_table_[index] = code->entry();
152   TraceCodegen(code);
153   PROFILE(isolate_, CodeCreateEvent(
154                         CodeEventListener::BYTECODE_HANDLER_TAG,
155                         AbstractCode::cast(*code),
156                         Bytecodes::ToString(bytecode, operand_scale).c_str()));
157 }
158 
GetBytecodeHandler(Bytecode bytecode,OperandScale operand_scale)159 Code* Interpreter::GetBytecodeHandler(Bytecode bytecode,
160                                       OperandScale operand_scale) {
161   DCHECK(IsDispatchTableInitialized());
162   DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
163   size_t index = GetDispatchTableIndex(bytecode, operand_scale);
164   Address code_entry = dispatch_table_[index];
165   return Code::GetCodeFromTargetAddress(code_entry);
166 }
167 
168 // static
GetDispatchTableIndex(Bytecode bytecode,OperandScale operand_scale)169 size_t Interpreter::GetDispatchTableIndex(Bytecode bytecode,
170                                           OperandScale operand_scale) {
171   static const size_t kEntriesPerOperandScale = 1u << kBitsPerByte;
172   size_t index = static_cast<size_t>(bytecode);
173   switch (operand_scale) {
174     case OperandScale::kSingle:
175       return index;
176     case OperandScale::kDouble:
177       return index + kEntriesPerOperandScale;
178     case OperandScale::kQuadruple:
179       return index + 2 * kEntriesPerOperandScale;
180   }
181   UNREACHABLE();
182   return 0;
183 }
184 
IterateDispatchTable(ObjectVisitor * v)185 void Interpreter::IterateDispatchTable(ObjectVisitor* v) {
186   for (int i = 0; i < kDispatchTableSize; i++) {
187     Address code_entry = dispatch_table_[i];
188     Object* code = code_entry == nullptr
189                        ? nullptr
190                        : Code::GetCodeFromTargetAddress(code_entry);
191     Object* old_code = code;
192     v->VisitPointer(&code);
193     if (code != old_code) {
194       dispatch_table_[i] = reinterpret_cast<Code*>(code)->entry();
195     }
196   }
197 }
198 
199 // static
InterruptBudget()200 int Interpreter::InterruptBudget() {
201   return FLAG_interrupt_budget * kCodeSizeMultiplier;
202 }
203 
204 namespace {
205 
ShouldPrintBytecode(Handle<SharedFunctionInfo> shared)206 bool ShouldPrintBytecode(Handle<SharedFunctionInfo> shared) {
207   if (!FLAG_print_bytecode) return false;
208 
209   // Checks whether function passed the filter.
210   if (shared->is_toplevel()) {
211     Vector<const char> filter = CStrVector(FLAG_print_bytecode_filter);
212     return (filter.length() == 0) || (filter.length() == 1 && filter[0] == '*');
213   } else {
214     return shared->PassesFilter(FLAG_print_bytecode_filter);
215   }
216 }
217 
218 }  // namespace
219 
InterpreterCompilationJob(CompilationInfo * info)220 InterpreterCompilationJob::InterpreterCompilationJob(CompilationInfo* info)
221     : CompilationJob(info->isolate(), info, "Ignition"),
222       generator_(info),
223       runtime_call_stats_(info->isolate()->counters()->runtime_call_stats()),
224       background_execute_counter_("CompileBackgroundIgnition"),
225       print_bytecode_(ShouldPrintBytecode(info->shared_info())) {}
226 
PrepareJobImpl()227 InterpreterCompilationJob::Status InterpreterCompilationJob::PrepareJobImpl() {
228   CodeGenerator::MakeCodePrologue(info(), "interpreter");
229 
230   if (print_bytecode_) {
231     OFStream os(stdout);
232     std::unique_ptr<char[]> name = info()->GetDebugName();
233     os << "[generating bytecode for function: " << info()->GetDebugName().get()
234        << "]" << std::endl
235        << std::flush;
236   }
237 
238   return SUCCEEDED;
239 }
240 
ExecuteJobImpl()241 InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
242   TimerScope runtimeTimer =
243       executed_on_background_thread()
244           ? TimerScope(&background_execute_counter_)
245           : TimerScope(runtime_call_stats_, &RuntimeCallStats::CompileIgnition);
246   // TODO(lpy): add support for background compilation RCS trace.
247   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileIgnition");
248 
249   generator()->GenerateBytecode(stack_limit());
250 
251   if (generator()->HasStackOverflow()) {
252     return FAILED;
253   }
254   return SUCCEEDED;
255 }
256 
FinalizeJobImpl()257 InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl() {
258   // Add background runtime call stats.
259   if (V8_UNLIKELY(FLAG_runtime_stats && executed_on_background_thread())) {
260     runtime_call_stats_->CompileBackgroundIgnition.Add(
261         &background_execute_counter_);
262   }
263 
264   RuntimeCallTimerScope runtimeTimer(
265       runtime_call_stats_, &RuntimeCallStats::CompileIgnitionFinalization);
266 
267   Handle<BytecodeArray> bytecodes = generator()->FinalizeBytecode(isolate());
268   if (generator()->HasStackOverflow()) {
269     return FAILED;
270   }
271 
272   if (print_bytecode_) {
273     OFStream os(stdout);
274     bytecodes->Print(os);
275     os << std::flush;
276   }
277 
278   info()->SetBytecodeArray(bytecodes);
279   info()->SetCode(info()->isolate()->builtins()->InterpreterEntryTrampoline());
280   return SUCCEEDED;
281 }
282 
NewCompilationJob(CompilationInfo * info)283 CompilationJob* Interpreter::NewCompilationJob(CompilationInfo* info) {
284   return new InterpreterCompilationJob(info);
285 }
286 
IsDispatchTableInitialized()287 bool Interpreter::IsDispatchTableInitialized() {
288   return dispatch_table_[0] != nullptr;
289 }
290 
ShouldInitializeDispatchTable()291 bool Interpreter::ShouldInitializeDispatchTable() {
292   if (FLAG_trace_ignition || FLAG_trace_ignition_codegen ||
293       FLAG_trace_ignition_dispatches) {
294     // Regenerate table to add bytecode tracing operations, print the assembly
295     // code generated by TurboFan or instrument handlers with dispatch counters.
296     return true;
297   }
298   return !IsDispatchTableInitialized();
299 }
300 
TraceCodegen(Handle<Code> code)301 void Interpreter::TraceCodegen(Handle<Code> code) {
302 #ifdef ENABLE_DISASSEMBLER
303   if (FLAG_trace_ignition_codegen) {
304     OFStream os(stdout);
305     code->Disassemble(nullptr, os);
306     os << std::flush;
307   }
308 #endif  // ENABLE_DISASSEMBLER
309 }
310 
LookupNameOfBytecodeHandler(Code * code)311 const char* Interpreter::LookupNameOfBytecodeHandler(Code* code) {
312 #ifdef ENABLE_DISASSEMBLER
313 #define RETURN_NAME(Name, ...)                                 \
314   if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == \
315       code->entry()) {                                         \
316     return #Name;                                              \
317   }
318   BYTECODE_LIST(RETURN_NAME)
319 #undef RETURN_NAME
320 #endif  // ENABLE_DISASSEMBLER
321   return nullptr;
322 }
323 
GetDispatchCounter(Bytecode from,Bytecode to) const324 uintptr_t Interpreter::GetDispatchCounter(Bytecode from, Bytecode to) const {
325   int from_index = Bytecodes::ToByte(from);
326   int to_index = Bytecodes::ToByte(to);
327   return bytecode_dispatch_counters_table_[from_index * kNumberOfBytecodes +
328                                            to_index];
329 }
330 
GetDispatchCountersObject()331 Local<v8::Object> Interpreter::GetDispatchCountersObject() {
332   v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(isolate_);
333   Local<v8::Context> context = isolate->GetCurrentContext();
334 
335   Local<v8::Object> counters_map = v8::Object::New(isolate);
336 
337   // Output is a JSON-encoded object of objects.
338   //
339   // The keys on the top level object are source bytecodes,
340   // and corresponding value are objects. Keys on these last are the
341   // destinations of the dispatch and the value associated is a counter for
342   // the correspondent source-destination dispatch chain.
343   //
344   // Only non-zero counters are written to file, but an entry in the top-level
345   // object is always present, even if the value is empty because all counters
346   // for that source are zero.
347 
348   for (int from_index = 0; from_index < kNumberOfBytecodes; ++from_index) {
349     Bytecode from_bytecode = Bytecodes::FromByte(from_index);
350     Local<v8::Object> counters_row = v8::Object::New(isolate);
351 
352     for (int to_index = 0; to_index < kNumberOfBytecodes; ++to_index) {
353       Bytecode to_bytecode = Bytecodes::FromByte(to_index);
354       uintptr_t counter = GetDispatchCounter(from_bytecode, to_bytecode);
355 
356       if (counter > 0) {
357         std::string to_name = Bytecodes::ToString(to_bytecode);
358         Local<v8::String> to_name_object =
359             v8::String::NewFromUtf8(isolate, to_name.c_str(),
360                                     NewStringType::kNormal)
361                 .ToLocalChecked();
362         Local<v8::Number> counter_object = v8::Number::New(isolate, counter);
363         CHECK(counters_row
364                   ->DefineOwnProperty(context, to_name_object, counter_object)
365                   .IsJust());
366       }
367     }
368 
369     std::string from_name = Bytecodes::ToString(from_bytecode);
370     Local<v8::String> from_name_object =
371         v8::String::NewFromUtf8(isolate, from_name.c_str(),
372                                 NewStringType::kNormal)
373             .ToLocalChecked();
374 
375     CHECK(
376         counters_map->DefineOwnProperty(context, from_name_object, counters_row)
377             .IsJust());
378   }
379 
380   return counters_map;
381 }
382 
383 // LdaZero
384 //
385 // Load literal '0' into the accumulator.
DoLdaZero(InterpreterAssembler * assembler)386 void Interpreter::DoLdaZero(InterpreterAssembler* assembler) {
387   Node* zero_value = __ NumberConstant(0.0);
388   __ SetAccumulator(zero_value);
389   __ Dispatch();
390 }
391 
392 // LdaSmi <imm>
393 //
394 // Load an integer literal into the accumulator as a Smi.
DoLdaSmi(InterpreterAssembler * assembler)395 void Interpreter::DoLdaSmi(InterpreterAssembler* assembler) {
396   Node* smi_int = __ BytecodeOperandImmSmi(0);
397   __ SetAccumulator(smi_int);
398   __ Dispatch();
399 }
400 
401 // LdaConstant <idx>
402 //
403 // Load constant literal at |idx| in the constant pool into the accumulator.
DoLdaConstant(InterpreterAssembler * assembler)404 void Interpreter::DoLdaConstant(InterpreterAssembler* assembler) {
405   Node* index = __ BytecodeOperandIdx(0);
406   Node* constant = __ LoadConstantPoolEntry(index);
407   __ SetAccumulator(constant);
408   __ Dispatch();
409 }
410 
411 // LdaUndefined
412 //
413 // Load Undefined into the accumulator.
DoLdaUndefined(InterpreterAssembler * assembler)414 void Interpreter::DoLdaUndefined(InterpreterAssembler* assembler) {
415   Node* undefined_value =
416       __ HeapConstant(isolate_->factory()->undefined_value());
417   __ SetAccumulator(undefined_value);
418   __ Dispatch();
419 }
420 
421 // LdaNull
422 //
423 // Load Null into the accumulator.
DoLdaNull(InterpreterAssembler * assembler)424 void Interpreter::DoLdaNull(InterpreterAssembler* assembler) {
425   Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
426   __ SetAccumulator(null_value);
427   __ Dispatch();
428 }
429 
430 // LdaTheHole
431 //
432 // Load TheHole into the accumulator.
DoLdaTheHole(InterpreterAssembler * assembler)433 void Interpreter::DoLdaTheHole(InterpreterAssembler* assembler) {
434   Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
435   __ SetAccumulator(the_hole_value);
436   __ Dispatch();
437 }
438 
439 // LdaTrue
440 //
441 // Load True into the accumulator.
DoLdaTrue(InterpreterAssembler * assembler)442 void Interpreter::DoLdaTrue(InterpreterAssembler* assembler) {
443   Node* true_value = __ HeapConstant(isolate_->factory()->true_value());
444   __ SetAccumulator(true_value);
445   __ Dispatch();
446 }
447 
448 // LdaFalse
449 //
450 // Load False into the accumulator.
DoLdaFalse(InterpreterAssembler * assembler)451 void Interpreter::DoLdaFalse(InterpreterAssembler* assembler) {
452   Node* false_value = __ HeapConstant(isolate_->factory()->false_value());
453   __ SetAccumulator(false_value);
454   __ Dispatch();
455 }
456 
457 // Ldar <src>
458 //
459 // Load accumulator with value from register <src>.
DoLdar(InterpreterAssembler * assembler)460 void Interpreter::DoLdar(InterpreterAssembler* assembler) {
461   Node* reg_index = __ BytecodeOperandReg(0);
462   Node* value = __ LoadRegister(reg_index);
463   __ SetAccumulator(value);
464   __ Dispatch();
465 }
466 
467 // Star <dst>
468 //
469 // Store accumulator to register <dst>.
DoStar(InterpreterAssembler * assembler)470 void Interpreter::DoStar(InterpreterAssembler* assembler) {
471   Node* reg_index = __ BytecodeOperandReg(0);
472   Node* accumulator = __ GetAccumulator();
473   __ StoreRegister(accumulator, reg_index);
474   __ Dispatch();
475 }
476 
477 // Mov <src> <dst>
478 //
479 // Stores the value of register <src> to register <dst>.
DoMov(InterpreterAssembler * assembler)480 void Interpreter::DoMov(InterpreterAssembler* assembler) {
481   Node* src_index = __ BytecodeOperandReg(0);
482   Node* src_value = __ LoadRegister(src_index);
483   Node* dst_index = __ BytecodeOperandReg(1);
484   __ StoreRegister(src_value, dst_index);
485   __ Dispatch();
486 }
487 
BuildLoadGlobal(int slot_operand_index,int name_operand_index,TypeofMode typeof_mode,InterpreterAssembler * assembler)488 void Interpreter::BuildLoadGlobal(int slot_operand_index,
489                                   int name_operand_index,
490                                   TypeofMode typeof_mode,
491                                   InterpreterAssembler* assembler) {
492   // Load the global via the LoadGlobalIC.
493   Node* feedback_vector = __ LoadFeedbackVector();
494   Node* feedback_slot = __ BytecodeOperandIdx(slot_operand_index);
495 
496   AccessorAssembler accessor_asm(assembler->state());
497 
498   Label try_handler(assembler, Label::kDeferred),
499       miss(assembler, Label::kDeferred);
500 
501   // Fast path without frame construction for the data case.
502   {
503     Label done(assembler);
504     Variable var_result(assembler, MachineRepresentation::kTagged);
505     ExitPoint exit_point(assembler, &done, &var_result);
506 
507     accessor_asm.LoadGlobalIC_TryPropertyCellCase(
508         feedback_vector, feedback_slot, &exit_point, &try_handler, &miss,
509         CodeStubAssembler::INTPTR_PARAMETERS);
510 
511     __ Bind(&done);
512     __ SetAccumulator(var_result.value());
513     __ Dispatch();
514   }
515 
516   // Slow path with frame construction.
517   {
518     Label done(assembler);
519     Variable var_result(assembler, MachineRepresentation::kTagged);
520     ExitPoint exit_point(assembler, &done, &var_result);
521 
522     __ Bind(&try_handler);
523     {
524       Node* context = __ GetContext();
525       Node* smi_slot = __ SmiTag(feedback_slot);
526       Node* name_index = __ BytecodeOperandIdx(name_operand_index);
527       Node* name = __ LoadConstantPoolEntry(name_index);
528 
529       AccessorAssembler::LoadICParameters params(context, nullptr, name,
530                                                  smi_slot, feedback_vector);
531       accessor_asm.LoadGlobalIC_TryHandlerCase(&params, typeof_mode,
532                                                &exit_point, &miss);
533     }
534 
535     __ Bind(&miss);
536     {
537       Node* context = __ GetContext();
538       Node* smi_slot = __ SmiTag(feedback_slot);
539       Node* name_index = __ BytecodeOperandIdx(name_operand_index);
540       Node* name = __ LoadConstantPoolEntry(name_index);
541 
542       AccessorAssembler::LoadICParameters params(context, nullptr, name,
543                                                  smi_slot, feedback_vector);
544       accessor_asm.LoadGlobalIC_MissCase(&params, &exit_point);
545     }
546 
547     __ Bind(&done);
548     {
549       __ SetAccumulator(var_result.value());
550       __ Dispatch();
551     }
552   }
553 }
554 
555 // LdaGlobal <name_index> <slot>
556 //
557 // Load the global with name in constant pool entry <name_index> into the
558 // accumulator using FeedBackVector slot <slot> outside of a typeof.
DoLdaGlobal(InterpreterAssembler * assembler)559 void Interpreter::DoLdaGlobal(InterpreterAssembler* assembler) {
560   static const int kNameOperandIndex = 0;
561   static const int kSlotOperandIndex = 1;
562 
563   BuildLoadGlobal(kSlotOperandIndex, kNameOperandIndex, NOT_INSIDE_TYPEOF,
564                   assembler);
565 }
566 
567 // LdaGlobalInsideTypeof <name_index> <slot>
568 //
569 // Load the global with name in constant pool entry <name_index> into the
570 // accumulator using FeedBackVector slot <slot> inside of a typeof.
DoLdaGlobalInsideTypeof(InterpreterAssembler * assembler)571 void Interpreter::DoLdaGlobalInsideTypeof(InterpreterAssembler* assembler) {
572   static const int kNameOperandIndex = 0;
573   static const int kSlotOperandIndex = 1;
574 
575   BuildLoadGlobal(kSlotOperandIndex, kNameOperandIndex, INSIDE_TYPEOF,
576                   assembler);
577 }
578 
DoStaGlobal(Callable ic,InterpreterAssembler * assembler)579 void Interpreter::DoStaGlobal(Callable ic, InterpreterAssembler* assembler) {
580   // Get the global object.
581   Node* context = __ GetContext();
582   Node* native_context = __ LoadNativeContext(context);
583   Node* global =
584       __ LoadContextElement(native_context, Context::EXTENSION_INDEX);
585 
586   // Store the global via the StoreIC.
587   Node* code_target = __ HeapConstant(ic.code());
588   Node* constant_index = __ BytecodeOperandIdx(0);
589   Node* name = __ LoadConstantPoolEntry(constant_index);
590   Node* value = __ GetAccumulator();
591   Node* raw_slot = __ BytecodeOperandIdx(1);
592   Node* smi_slot = __ SmiTag(raw_slot);
593   Node* feedback_vector = __ LoadFeedbackVector();
594   __ CallStub(ic.descriptor(), code_target, context, global, name, value,
595               smi_slot, feedback_vector);
596   __ Dispatch();
597 }
598 
599 // StaGlobalSloppy <name_index> <slot>
600 //
601 // Store the value in the accumulator into the global with name in constant pool
602 // entry <name_index> using FeedBackVector slot <slot> in sloppy mode.
DoStaGlobalSloppy(InterpreterAssembler * assembler)603 void Interpreter::DoStaGlobalSloppy(InterpreterAssembler* assembler) {
604   Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY);
605   DoStaGlobal(ic, assembler);
606 }
607 
608 // StaGlobalStrict <name_index> <slot>
609 //
610 // Store the value in the accumulator into the global with name in constant pool
611 // entry <name_index> using FeedBackVector slot <slot> in strict mode.
DoStaGlobalStrict(InterpreterAssembler * assembler)612 void Interpreter::DoStaGlobalStrict(InterpreterAssembler* assembler) {
613   Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, STRICT);
614   DoStaGlobal(ic, assembler);
615 }
616 
617 // LdaContextSlot <context> <slot_index> <depth>
618 //
619 // Load the object in |slot_index| of the context at |depth| in the context
620 // chain starting at |context| into the accumulator.
DoLdaContextSlot(InterpreterAssembler * assembler)621 void Interpreter::DoLdaContextSlot(InterpreterAssembler* assembler) {
622   Node* reg_index = __ BytecodeOperandReg(0);
623   Node* context = __ LoadRegister(reg_index);
624   Node* slot_index = __ BytecodeOperandIdx(1);
625   Node* depth = __ BytecodeOperandUImm(2);
626   Node* slot_context = __ GetContextAtDepth(context, depth);
627   Node* result = __ LoadContextElement(slot_context, slot_index);
628   __ SetAccumulator(result);
629   __ Dispatch();
630 }
631 
632 // LdaImmutableContextSlot <context> <slot_index> <depth>
633 //
634 // Load the object in |slot_index| of the context at |depth| in the context
635 // chain starting at |context| into the accumulator.
DoLdaImmutableContextSlot(InterpreterAssembler * assembler)636 void Interpreter::DoLdaImmutableContextSlot(InterpreterAssembler* assembler) {
637   // TODO(danno) Share the actual code object rather creating a duplicate one.
638   DoLdaContextSlot(assembler);
639 }
640 
641 // LdaCurrentContextSlot <slot_index>
642 //
643 // Load the object in |slot_index| of the current context into the accumulator.
DoLdaCurrentContextSlot(InterpreterAssembler * assembler)644 void Interpreter::DoLdaCurrentContextSlot(InterpreterAssembler* assembler) {
645   Node* slot_index = __ BytecodeOperandIdx(0);
646   Node* slot_context = __ GetContext();
647   Node* result = __ LoadContextElement(slot_context, slot_index);
648   __ SetAccumulator(result);
649   __ Dispatch();
650 }
651 
652 // LdaImmutableCurrentContextSlot <slot_index>
653 //
654 // Load the object in |slot_index| of the current context into the accumulator.
DoLdaImmutableCurrentContextSlot(InterpreterAssembler * assembler)655 void Interpreter::DoLdaImmutableCurrentContextSlot(
656     InterpreterAssembler* assembler) {
657   // TODO(danno) Share the actual code object rather creating a duplicate one.
658   DoLdaCurrentContextSlot(assembler);
659 }
660 
661 // StaContextSlot <context> <slot_index> <depth>
662 //
663 // Stores the object in the accumulator into |slot_index| of the context at
664 // |depth| in the context chain starting at |context|.
DoStaContextSlot(InterpreterAssembler * assembler)665 void Interpreter::DoStaContextSlot(InterpreterAssembler* assembler) {
666   Node* value = __ GetAccumulator();
667   Node* reg_index = __ BytecodeOperandReg(0);
668   Node* context = __ LoadRegister(reg_index);
669   Node* slot_index = __ BytecodeOperandIdx(1);
670   Node* depth = __ BytecodeOperandUImm(2);
671   Node* slot_context = __ GetContextAtDepth(context, depth);
672   __ StoreContextElement(slot_context, slot_index, value);
673   __ Dispatch();
674 }
675 
676 // StaCurrentContextSlot <slot_index>
677 //
678 // Stores the object in the accumulator into |slot_index| of the current
679 // context.
DoStaCurrentContextSlot(InterpreterAssembler * assembler)680 void Interpreter::DoStaCurrentContextSlot(InterpreterAssembler* assembler) {
681   Node* value = __ GetAccumulator();
682   Node* slot_index = __ BytecodeOperandIdx(0);
683   Node* slot_context = __ GetContext();
684   __ StoreContextElement(slot_context, slot_index, value);
685   __ Dispatch();
686 }
687 
DoLdaLookupSlot(Runtime::FunctionId function_id,InterpreterAssembler * assembler)688 void Interpreter::DoLdaLookupSlot(Runtime::FunctionId function_id,
689                                   InterpreterAssembler* assembler) {
690   Node* name_index = __ BytecodeOperandIdx(0);
691   Node* name = __ LoadConstantPoolEntry(name_index);
692   Node* context = __ GetContext();
693   Node* result = __ CallRuntime(function_id, context, name);
694   __ SetAccumulator(result);
695   __ Dispatch();
696 }
697 
698 // LdaLookupSlot <name_index>
699 //
700 // Lookup the object with the name in constant pool entry |name_index|
701 // dynamically.
DoLdaLookupSlot(InterpreterAssembler * assembler)702 void Interpreter::DoLdaLookupSlot(InterpreterAssembler* assembler) {
703   DoLdaLookupSlot(Runtime::kLoadLookupSlot, assembler);
704 }
705 
706 // LdaLookupSlotInsideTypeof <name_index>
707 //
708 // Lookup the object with the name in constant pool entry |name_index|
709 // dynamically without causing a NoReferenceError.
DoLdaLookupSlotInsideTypeof(InterpreterAssembler * assembler)710 void Interpreter::DoLdaLookupSlotInsideTypeof(InterpreterAssembler* assembler) {
711   DoLdaLookupSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler);
712 }
713 
DoLdaLookupContextSlot(Runtime::FunctionId function_id,InterpreterAssembler * assembler)714 void Interpreter::DoLdaLookupContextSlot(Runtime::FunctionId function_id,
715                                          InterpreterAssembler* assembler) {
716   Node* context = __ GetContext();
717   Node* name_index = __ BytecodeOperandIdx(0);
718   Node* slot_index = __ BytecodeOperandIdx(1);
719   Node* depth = __ BytecodeOperandUImm(2);
720 
721   Label slowpath(assembler, Label::kDeferred);
722 
723   // Check for context extensions to allow the fast path.
724   __ GotoIfHasContextExtensionUpToDepth(context, depth, &slowpath);
725 
726   // Fast path does a normal load context.
727   {
728     Node* slot_context = __ GetContextAtDepth(context, depth);
729     Node* result = __ LoadContextElement(slot_context, slot_index);
730     __ SetAccumulator(result);
731     __ Dispatch();
732   }
733 
734   // Slow path when we have to call out to the runtime.
735   __ Bind(&slowpath);
736   {
737     Node* name = __ LoadConstantPoolEntry(name_index);
738     Node* result = __ CallRuntime(function_id, context, name);
739     __ SetAccumulator(result);
740     __ Dispatch();
741   }
742 }
743 
744 // LdaLookupSlot <name_index>
745 //
746 // Lookup the object with the name in constant pool entry |name_index|
747 // dynamically.
DoLdaLookupContextSlot(InterpreterAssembler * assembler)748 void Interpreter::DoLdaLookupContextSlot(InterpreterAssembler* assembler) {
749   DoLdaLookupContextSlot(Runtime::kLoadLookupSlot, assembler);
750 }
751 
752 // LdaLookupSlotInsideTypeof <name_index>
753 //
754 // Lookup the object with the name in constant pool entry |name_index|
755 // dynamically without causing a NoReferenceError.
DoLdaLookupContextSlotInsideTypeof(InterpreterAssembler * assembler)756 void Interpreter::DoLdaLookupContextSlotInsideTypeof(
757     InterpreterAssembler* assembler) {
758   DoLdaLookupContextSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler);
759 }
760 
DoLdaLookupGlobalSlot(Runtime::FunctionId function_id,InterpreterAssembler * assembler)761 void Interpreter::DoLdaLookupGlobalSlot(Runtime::FunctionId function_id,
762                                         InterpreterAssembler* assembler) {
763   Node* context = __ GetContext();
764   Node* depth = __ BytecodeOperandUImm(2);
765 
766   Label slowpath(assembler, Label::kDeferred);
767 
768   // Check for context extensions to allow the fast path
769   __ GotoIfHasContextExtensionUpToDepth(context, depth, &slowpath);
770 
771   // Fast path does a normal load global
772   {
773     static const int kNameOperandIndex = 0;
774     static const int kSlotOperandIndex = 1;
775 
776     TypeofMode typeof_mode = function_id == Runtime::kLoadLookupSlotInsideTypeof
777                                  ? INSIDE_TYPEOF
778                                  : NOT_INSIDE_TYPEOF;
779 
780     BuildLoadGlobal(kSlotOperandIndex, kNameOperandIndex, typeof_mode,
781                     assembler);
782   }
783 
784   // Slow path when we have to call out to the runtime
785   __ Bind(&slowpath);
786   {
787     Node* name_index = __ BytecodeOperandIdx(0);
788     Node* name = __ LoadConstantPoolEntry(name_index);
789     Node* result = __ CallRuntime(function_id, context, name);
790     __ SetAccumulator(result);
791     __ Dispatch();
792   }
793 }
794 
795 // LdaLookupGlobalSlot <name_index> <feedback_slot> <depth>
796 //
797 // Lookup the object with the name in constant pool entry |name_index|
798 // dynamically.
DoLdaLookupGlobalSlot(InterpreterAssembler * assembler)799 void Interpreter::DoLdaLookupGlobalSlot(InterpreterAssembler* assembler) {
800   DoLdaLookupGlobalSlot(Runtime::kLoadLookupSlot, assembler);
801 }
802 
803 // LdaLookupGlobalSlotInsideTypeof <name_index> <feedback_slot> <depth>
804 //
805 // Lookup the object with the name in constant pool entry |name_index|
806 // dynamically without causing a NoReferenceError.
DoLdaLookupGlobalSlotInsideTypeof(InterpreterAssembler * assembler)807 void Interpreter::DoLdaLookupGlobalSlotInsideTypeof(
808     InterpreterAssembler* assembler) {
809   DoLdaLookupGlobalSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler);
810 }
811 
DoStaLookupSlot(LanguageMode language_mode,InterpreterAssembler * assembler)812 void Interpreter::DoStaLookupSlot(LanguageMode language_mode,
813                                   InterpreterAssembler* assembler) {
814   Node* value = __ GetAccumulator();
815   Node* index = __ BytecodeOperandIdx(0);
816   Node* name = __ LoadConstantPoolEntry(index);
817   Node* context = __ GetContext();
818   Node* result = __ CallRuntime(is_strict(language_mode)
819                                     ? Runtime::kStoreLookupSlot_Strict
820                                     : Runtime::kStoreLookupSlot_Sloppy,
821                                 context, name, value);
822   __ SetAccumulator(result);
823   __ Dispatch();
824 }
825 
826 // StaLookupSlotSloppy <name_index>
827 //
828 // Store the object in accumulator to the object with the name in constant
829 // pool entry |name_index| in sloppy mode.
DoStaLookupSlotSloppy(InterpreterAssembler * assembler)830 void Interpreter::DoStaLookupSlotSloppy(InterpreterAssembler* assembler) {
831   DoStaLookupSlot(LanguageMode::SLOPPY, assembler);
832 }
833 
834 // StaLookupSlotStrict <name_index>
835 //
836 // Store the object in accumulator to the object with the name in constant
837 // pool entry |name_index| in strict mode.
DoStaLookupSlotStrict(InterpreterAssembler * assembler)838 void Interpreter::DoStaLookupSlotStrict(InterpreterAssembler* assembler) {
839   DoStaLookupSlot(LanguageMode::STRICT, assembler);
840 }
841 
842 // LdaNamedProperty <object> <name_index> <slot>
843 //
844 // Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
845 // constant pool entry <name_index>.
DoLdaNamedProperty(InterpreterAssembler * assembler)846 void Interpreter::DoLdaNamedProperty(InterpreterAssembler* assembler) {
847   Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_);
848   Node* code_target = __ HeapConstant(ic.code());
849   Node* register_index = __ BytecodeOperandReg(0);
850   Node* object = __ LoadRegister(register_index);
851   Node* constant_index = __ BytecodeOperandIdx(1);
852   Node* name = __ LoadConstantPoolEntry(constant_index);
853   Node* raw_slot = __ BytecodeOperandIdx(2);
854   Node* smi_slot = __ SmiTag(raw_slot);
855   Node* feedback_vector = __ LoadFeedbackVector();
856   Node* context = __ GetContext();
857   Node* result = __ CallStub(ic.descriptor(), code_target, context, object,
858                              name, smi_slot, feedback_vector);
859   __ SetAccumulator(result);
860   __ Dispatch();
861 }
862 
863 // KeyedLoadIC <object> <slot>
864 //
865 // Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
866 // in the accumulator.
DoLdaKeyedProperty(InterpreterAssembler * assembler)867 void Interpreter::DoLdaKeyedProperty(InterpreterAssembler* assembler) {
868   Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_);
869   Node* code_target = __ HeapConstant(ic.code());
870   Node* reg_index = __ BytecodeOperandReg(0);
871   Node* object = __ LoadRegister(reg_index);
872   Node* name = __ GetAccumulator();
873   Node* raw_slot = __ BytecodeOperandIdx(1);
874   Node* smi_slot = __ SmiTag(raw_slot);
875   Node* feedback_vector = __ LoadFeedbackVector();
876   Node* context = __ GetContext();
877   Node* result = __ CallStub(ic.descriptor(), code_target, context, object,
878                              name, smi_slot, feedback_vector);
879   __ SetAccumulator(result);
880   __ Dispatch();
881 }
882 
DoStoreIC(Callable ic,InterpreterAssembler * assembler)883 void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) {
884   Node* code_target = __ HeapConstant(ic.code());
885   Node* object_reg_index = __ BytecodeOperandReg(0);
886   Node* object = __ LoadRegister(object_reg_index);
887   Node* constant_index = __ BytecodeOperandIdx(1);
888   Node* name = __ LoadConstantPoolEntry(constant_index);
889   Node* value = __ GetAccumulator();
890   Node* raw_slot = __ BytecodeOperandIdx(2);
891   Node* smi_slot = __ SmiTag(raw_slot);
892   Node* feedback_vector = __ LoadFeedbackVector();
893   Node* context = __ GetContext();
894   __ CallStub(ic.descriptor(), code_target, context, object, name, value,
895               smi_slot, feedback_vector);
896   __ Dispatch();
897 }
898 
899 // StaNamedPropertySloppy <object> <name_index> <slot>
900 //
901 // Calls the sloppy mode StoreIC at FeedBackVector slot <slot> for <object> and
902 // the name in constant pool entry <name_index> with the value in the
903 // accumulator.
DoStaNamedPropertySloppy(InterpreterAssembler * assembler)904 void Interpreter::DoStaNamedPropertySloppy(InterpreterAssembler* assembler) {
905   Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY);
906   DoStoreIC(ic, assembler);
907 }
908 
909 // StaNamedPropertyStrict <object> <name_index> <slot>
910 //
911 // Calls the strict mode StoreIC at FeedBackVector slot <slot> for <object> and
912 // the name in constant pool entry <name_index> with the value in the
913 // accumulator.
DoStaNamedPropertyStrict(InterpreterAssembler * assembler)914 void Interpreter::DoStaNamedPropertyStrict(InterpreterAssembler* assembler) {
915   Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, STRICT);
916   DoStoreIC(ic, assembler);
917 }
918 
919 // StaNamedOwnProperty <object> <name_index> <slot>
920 //
921 // Calls the StoreOwnIC at FeedBackVector slot <slot> for <object> and
922 // the name in constant pool entry <name_index> with the value in the
923 // accumulator.
DoStaNamedOwnProperty(InterpreterAssembler * assembler)924 void Interpreter::DoStaNamedOwnProperty(InterpreterAssembler* assembler) {
925   Callable ic = CodeFactory::StoreOwnICInOptimizedCode(isolate_);
926   DoStoreIC(ic, assembler);
927 }
928 
DoKeyedStoreIC(Callable ic,InterpreterAssembler * assembler)929 void Interpreter::DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler) {
930   Node* code_target = __ HeapConstant(ic.code());
931   Node* object_reg_index = __ BytecodeOperandReg(0);
932   Node* object = __ LoadRegister(object_reg_index);
933   Node* name_reg_index = __ BytecodeOperandReg(1);
934   Node* name = __ LoadRegister(name_reg_index);
935   Node* value = __ GetAccumulator();
936   Node* raw_slot = __ BytecodeOperandIdx(2);
937   Node* smi_slot = __ SmiTag(raw_slot);
938   Node* feedback_vector = __ LoadFeedbackVector();
939   Node* context = __ GetContext();
940   __ CallStub(ic.descriptor(), code_target, context, object, name, value,
941               smi_slot, feedback_vector);
942   __ Dispatch();
943 }
944 
945 // StaKeyedPropertySloppy <object> <key> <slot>
946 //
947 // Calls the sloppy mode KeyStoreIC at FeedBackVector slot <slot> for <object>
948 // and the key <key> with the value in the accumulator.
DoStaKeyedPropertySloppy(InterpreterAssembler * assembler)949 void Interpreter::DoStaKeyedPropertySloppy(InterpreterAssembler* assembler) {
950   Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY);
951   DoKeyedStoreIC(ic, assembler);
952 }
953 
954 // StaKeyedPropertyStrict <object> <key> <slot>
955 //
956 // Calls the strict mode KeyStoreIC at FeedBackVector slot <slot> for <object>
957 // and the key <key> with the value in the accumulator.
DoStaKeyedPropertyStrict(InterpreterAssembler * assembler)958 void Interpreter::DoStaKeyedPropertyStrict(InterpreterAssembler* assembler) {
959   Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT);
960   DoKeyedStoreIC(ic, assembler);
961 }
962 
963 // StaDataPropertyInLiteral <object> <name> <flags>
964 //
965 // Define a property <name> with value from the accumulator in <object>.
966 // Property attributes and whether set_function_name are stored in
967 // DataPropertyInLiteralFlags <flags>.
968 //
969 // This definition is not observable and is used only for definitions
970 // in object or class literals.
DoStaDataPropertyInLiteral(InterpreterAssembler * assembler)971 void Interpreter::DoStaDataPropertyInLiteral(InterpreterAssembler* assembler) {
972   Node* object = __ LoadRegister(__ BytecodeOperandReg(0));
973   Node* name = __ LoadRegister(__ BytecodeOperandReg(1));
974   Node* value = __ GetAccumulator();
975   Node* flags = __ SmiFromWord32(__ BytecodeOperandFlag(2));
976   Node* vector_index = __ SmiTag(__ BytecodeOperandIdx(3));
977 
978   Node* feedback_vector = __ LoadFeedbackVector();
979   Node* context = __ GetContext();
980 
981   __ CallRuntime(Runtime::kDefineDataPropertyInLiteral, context, object, name,
982                  value, flags, feedback_vector, vector_index);
983   __ Dispatch();
984 }
985 
986 // LdaModuleVariable <cell_index> <depth>
987 //
988 // Load the contents of a module variable into the accumulator.  The variable is
989 // identified by <cell_index>.  <depth> is the depth of the current context
990 // relative to the module context.
DoLdaModuleVariable(InterpreterAssembler * assembler)991 void Interpreter::DoLdaModuleVariable(InterpreterAssembler* assembler) {
992   Node* cell_index = __ BytecodeOperandImmIntPtr(0);
993   Node* depth = __ BytecodeOperandUImm(1);
994 
995   Node* module_context = __ GetContextAtDepth(__ GetContext(), depth);
996   Node* module =
997       __ LoadContextElement(module_context, Context::EXTENSION_INDEX);
998 
999   Label if_export(assembler), if_import(assembler), end(assembler);
1000   __ Branch(__ IntPtrGreaterThan(cell_index, __ IntPtrConstant(0)), &if_export,
1001             &if_import);
1002 
1003   __ Bind(&if_export);
1004   {
1005     Node* regular_exports =
1006         __ LoadObjectField(module, Module::kRegularExportsOffset);
1007     // The actual array index is (cell_index - 1).
1008     Node* export_index = __ IntPtrSub(cell_index, __ IntPtrConstant(1));
1009     Node* cell = __ LoadFixedArrayElement(regular_exports, export_index);
1010     __ SetAccumulator(__ LoadObjectField(cell, Cell::kValueOffset));
1011     __ Goto(&end);
1012   }
1013 
1014   __ Bind(&if_import);
1015   {
1016     Node* regular_imports =
1017         __ LoadObjectField(module, Module::kRegularImportsOffset);
1018     // The actual array index is (-cell_index - 1).
1019     Node* import_index = __ IntPtrSub(__ IntPtrConstant(-1), cell_index);
1020     Node* cell = __ LoadFixedArrayElement(regular_imports, import_index);
1021     __ SetAccumulator(__ LoadObjectField(cell, Cell::kValueOffset));
1022     __ Goto(&end);
1023   }
1024 
1025   __ Bind(&end);
1026   __ Dispatch();
1027 }
1028 
1029 // StaModuleVariable <cell_index> <depth>
1030 //
1031 // Store accumulator to the module variable identified by <cell_index>.
1032 // <depth> is the depth of the current context relative to the module context.
DoStaModuleVariable(InterpreterAssembler * assembler)1033 void Interpreter::DoStaModuleVariable(InterpreterAssembler* assembler) {
1034   Node* value = __ GetAccumulator();
1035   Node* cell_index = __ BytecodeOperandImmIntPtr(0);
1036   Node* depth = __ BytecodeOperandUImm(1);
1037 
1038   Node* module_context = __ GetContextAtDepth(__ GetContext(), depth);
1039   Node* module =
1040       __ LoadContextElement(module_context, Context::EXTENSION_INDEX);
1041 
1042   Label if_export(assembler), if_import(assembler), end(assembler);
1043   __ Branch(__ IntPtrGreaterThan(cell_index, __ IntPtrConstant(0)), &if_export,
1044             &if_import);
1045 
1046   __ Bind(&if_export);
1047   {
1048     Node* regular_exports =
1049         __ LoadObjectField(module, Module::kRegularExportsOffset);
1050     // The actual array index is (cell_index - 1).
1051     Node* export_index = __ IntPtrSub(cell_index, __ IntPtrConstant(1));
1052     Node* cell = __ LoadFixedArrayElement(regular_exports, export_index);
1053     __ StoreObjectField(cell, Cell::kValueOffset, value);
1054     __ Goto(&end);
1055   }
1056 
1057   __ Bind(&if_import);
1058   {
1059     // Not supported (probably never).
1060     __ Abort(kUnsupportedModuleOperation);
1061     __ Goto(&end);
1062   }
1063 
1064   __ Bind(&end);
1065   __ Dispatch();
1066 }
1067 
1068 // PushContext <context>
1069 //
1070 // Saves the current context in <context>, and pushes the accumulator as the
1071 // new current context.
DoPushContext(InterpreterAssembler * assembler)1072 void Interpreter::DoPushContext(InterpreterAssembler* assembler) {
1073   Node* reg_index = __ BytecodeOperandReg(0);
1074   Node* new_context = __ GetAccumulator();
1075   Node* old_context = __ GetContext();
1076   __ StoreRegister(old_context, reg_index);
1077   __ SetContext(new_context);
1078   __ Dispatch();
1079 }
1080 
1081 // PopContext <context>
1082 //
1083 // Pops the current context and sets <context> as the new context.
DoPopContext(InterpreterAssembler * assembler)1084 void Interpreter::DoPopContext(InterpreterAssembler* assembler) {
1085   Node* reg_index = __ BytecodeOperandReg(0);
1086   Node* context = __ LoadRegister(reg_index);
1087   __ SetContext(context);
1088   __ Dispatch();
1089 }
1090 
1091 // TODO(mythria): Remove this function once all CompareOps record type feedback.
DoCompareOp(Token::Value compare_op,InterpreterAssembler * assembler)1092 void Interpreter::DoCompareOp(Token::Value compare_op,
1093                               InterpreterAssembler* assembler) {
1094   Node* reg_index = __ BytecodeOperandReg(0);
1095   Node* lhs = __ LoadRegister(reg_index);
1096   Node* rhs = __ GetAccumulator();
1097   Node* context = __ GetContext();
1098   Node* result;
1099   switch (compare_op) {
1100     case Token::IN:
1101       result = assembler->HasProperty(rhs, lhs, context);
1102       break;
1103     case Token::INSTANCEOF:
1104       result = assembler->InstanceOf(lhs, rhs, context);
1105       break;
1106     default:
1107       UNREACHABLE();
1108   }
1109   __ SetAccumulator(result);
1110   __ Dispatch();
1111 }
1112 
1113 template <class Generator>
DoBinaryOpWithFeedback(InterpreterAssembler * assembler)1114 void Interpreter::DoBinaryOpWithFeedback(InterpreterAssembler* assembler) {
1115   Node* reg_index = __ BytecodeOperandReg(0);
1116   Node* lhs = __ LoadRegister(reg_index);
1117   Node* rhs = __ GetAccumulator();
1118   Node* context = __ GetContext();
1119   Node* slot_index = __ BytecodeOperandIdx(1);
1120   Node* feedback_vector = __ LoadFeedbackVector();
1121   Node* result = Generator::Generate(assembler, lhs, rhs, slot_index,
1122                                      feedback_vector, context);
1123   __ SetAccumulator(result);
1124   __ Dispatch();
1125 }
1126 
DoCompareOpWithFeedback(Token::Value compare_op,InterpreterAssembler * assembler)1127 void Interpreter::DoCompareOpWithFeedback(Token::Value compare_op,
1128                                           InterpreterAssembler* assembler) {
1129   Node* reg_index = __ BytecodeOperandReg(0);
1130   Node* lhs = __ LoadRegister(reg_index);
1131   Node* rhs = __ GetAccumulator();
1132   Node* context = __ GetContext();
1133   Node* slot_index = __ BytecodeOperandIdx(1);
1134   Node* feedback_vector = __ LoadFeedbackVector();
1135 
1136   // TODO(interpreter): the only reason this check is here is because we
1137   // sometimes emit comparisons that shouldn't collect feedback (e.g.
1138   // try-finally blocks and generators), and we could get rid of this by
1139   // introducing Smi equality tests.
1140   Label gather_type_feedback(assembler), do_compare(assembler);
1141   __ Branch(__ WordEqual(slot_index, __ IntPtrConstant(0)), &do_compare,
1142             &gather_type_feedback);
1143 
1144   __ Bind(&gather_type_feedback);
1145   {
1146     Variable var_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
1147     Label lhs_is_not_smi(assembler), lhs_is_not_number(assembler),
1148         lhs_is_not_string(assembler), gather_rhs_type(assembler),
1149         update_feedback(assembler);
1150 
1151     __ GotoIfNot(__ TaggedIsSmi(lhs), &lhs_is_not_smi);
1152 
1153     var_type_feedback.Bind(
1154         __ SmiConstant(CompareOperationFeedback::kSignedSmall));
1155     __ Goto(&gather_rhs_type);
1156 
1157     __ Bind(&lhs_is_not_smi);
1158     {
1159       Node* lhs_map = __ LoadMap(lhs);
1160       __ GotoIfNot(__ IsHeapNumberMap(lhs_map), &lhs_is_not_number);
1161 
1162       var_type_feedback.Bind(__ SmiConstant(CompareOperationFeedback::kNumber));
1163       __ Goto(&gather_rhs_type);
1164 
1165       __ Bind(&lhs_is_not_number);
1166       {
1167         Node* lhs_instance_type = __ LoadInstanceType(lhs);
1168         if (Token::IsOrderedRelationalCompareOp(compare_op)) {
1169           Label lhs_is_not_oddball(assembler);
1170           __ GotoIfNot(
1171               __ Word32Equal(lhs_instance_type, __ Int32Constant(ODDBALL_TYPE)),
1172               &lhs_is_not_oddball);
1173 
1174           var_type_feedback.Bind(
1175               __ SmiConstant(CompareOperationFeedback::kNumberOrOddball));
1176           __ Goto(&gather_rhs_type);
1177 
1178           __ Bind(&lhs_is_not_oddball);
1179         }
1180 
1181         Label lhs_is_not_string(assembler);
1182         __ GotoIfNot(__ IsStringInstanceType(lhs_instance_type),
1183                      &lhs_is_not_string);
1184 
1185         if (Token::IsOrderedRelationalCompareOp(compare_op)) {
1186           var_type_feedback.Bind(
1187               __ SmiConstant(CompareOperationFeedback::kString));
1188         } else {
1189           var_type_feedback.Bind(__ SelectSmiConstant(
1190               __ Word32Equal(
1191                   __ Word32And(lhs_instance_type,
1192                                __ Int32Constant(kIsNotInternalizedMask)),
1193                   __ Int32Constant(kInternalizedTag)),
1194               CompareOperationFeedback::kInternalizedString,
1195               CompareOperationFeedback::kString));
1196         }
1197         __ Goto(&gather_rhs_type);
1198 
1199         __ Bind(&lhs_is_not_string);
1200         if (Token::IsEqualityOp(compare_op)) {
1201           var_type_feedback.Bind(__ SelectSmiConstant(
1202               __ IsJSReceiverInstanceType(lhs_instance_type),
1203               CompareOperationFeedback::kReceiver,
1204               CompareOperationFeedback::kAny));
1205         } else {
1206           var_type_feedback.Bind(
1207               __ SmiConstant(CompareOperationFeedback::kAny));
1208         }
1209         __ Goto(&gather_rhs_type);
1210       }
1211     }
1212 
1213     __ Bind(&gather_rhs_type);
1214     {
1215       Label rhs_is_not_smi(assembler), rhs_is_not_number(assembler);
1216 
1217       __ GotoIfNot(__ TaggedIsSmi(rhs), &rhs_is_not_smi);
1218 
1219       var_type_feedback.Bind(
1220           __ SmiOr(var_type_feedback.value(),
1221                    __ SmiConstant(CompareOperationFeedback::kSignedSmall)));
1222       __ Goto(&update_feedback);
1223 
1224       __ Bind(&rhs_is_not_smi);
1225       {
1226         Node* rhs_map = __ LoadMap(rhs);
1227         __ GotoIfNot(__ IsHeapNumberMap(rhs_map), &rhs_is_not_number);
1228 
1229         var_type_feedback.Bind(
1230             __ SmiOr(var_type_feedback.value(),
1231                      __ SmiConstant(CompareOperationFeedback::kNumber)));
1232         __ Goto(&update_feedback);
1233 
1234         __ Bind(&rhs_is_not_number);
1235         {
1236           Node* rhs_instance_type = __ LoadInstanceType(rhs);
1237           if (Token::IsOrderedRelationalCompareOp(compare_op)) {
1238             Label rhs_is_not_oddball(assembler);
1239             __ GotoIfNot(__ Word32Equal(rhs_instance_type,
1240                                         __ Int32Constant(ODDBALL_TYPE)),
1241                          &rhs_is_not_oddball);
1242 
1243             var_type_feedback.Bind(__ SmiOr(
1244                 var_type_feedback.value(),
1245                 __ SmiConstant(CompareOperationFeedback::kNumberOrOddball)));
1246             __ Goto(&update_feedback);
1247 
1248             __ Bind(&rhs_is_not_oddball);
1249           }
1250 
1251           Label rhs_is_not_string(assembler);
1252           __ GotoIfNot(__ IsStringInstanceType(rhs_instance_type),
1253                        &rhs_is_not_string);
1254 
1255           if (Token::IsOrderedRelationalCompareOp(compare_op)) {
1256             var_type_feedback.Bind(
1257                 __ SmiOr(var_type_feedback.value(),
1258                          __ SmiConstant(CompareOperationFeedback::kString)));
1259           } else {
1260             var_type_feedback.Bind(__ SmiOr(
1261                 var_type_feedback.value(),
1262                 __ SelectSmiConstant(
1263                     __ Word32Equal(
1264                         __ Word32And(rhs_instance_type,
1265                                      __ Int32Constant(kIsNotInternalizedMask)),
1266                         __ Int32Constant(kInternalizedTag)),
1267                     CompareOperationFeedback::kInternalizedString,
1268                     CompareOperationFeedback::kString)));
1269           }
1270           __ Goto(&update_feedback);
1271 
1272           __ Bind(&rhs_is_not_string);
1273           if (Token::IsEqualityOp(compare_op)) {
1274             var_type_feedback.Bind(
1275                 __ SmiOr(var_type_feedback.value(),
1276                          __ SelectSmiConstant(
1277                              __ IsJSReceiverInstanceType(rhs_instance_type),
1278                              CompareOperationFeedback::kReceiver,
1279                              CompareOperationFeedback::kAny)));
1280           } else {
1281             var_type_feedback.Bind(
1282                 __ SmiConstant(CompareOperationFeedback::kAny));
1283           }
1284           __ Goto(&update_feedback);
1285         }
1286       }
1287     }
1288 
1289     __ Bind(&update_feedback);
1290     {
1291       __ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
1292       __ Goto(&do_compare);
1293     }
1294   }
1295 
1296   __ Bind(&do_compare);
1297   Node* result;
1298   switch (compare_op) {
1299     case Token::EQ:
1300       result = assembler->Equal(CodeStubAssembler::kDontNegateResult, lhs, rhs,
1301                                 context);
1302       break;
1303     case Token::NE:
1304       result =
1305           assembler->Equal(CodeStubAssembler::kNegateResult, lhs, rhs, context);
1306       break;
1307     case Token::EQ_STRICT:
1308       result = assembler->StrictEqual(CodeStubAssembler::kDontNegateResult, lhs,
1309                                       rhs, context);
1310       break;
1311     case Token::LT:
1312       result = assembler->RelationalComparison(CodeStubAssembler::kLessThan,
1313                                                lhs, rhs, context);
1314       break;
1315     case Token::GT:
1316       result = assembler->RelationalComparison(CodeStubAssembler::kGreaterThan,
1317                                                lhs, rhs, context);
1318       break;
1319     case Token::LTE:
1320       result = assembler->RelationalComparison(
1321           CodeStubAssembler::kLessThanOrEqual, lhs, rhs, context);
1322       break;
1323     case Token::GTE:
1324       result = assembler->RelationalComparison(
1325           CodeStubAssembler::kGreaterThanOrEqual, lhs, rhs, context);
1326       break;
1327     default:
1328       UNREACHABLE();
1329   }
1330   __ SetAccumulator(result);
1331   __ Dispatch();
1332 }
1333 
1334 // Add <src>
1335 //
1336 // Add register <src> to accumulator.
DoAdd(InterpreterAssembler * assembler)1337 void Interpreter::DoAdd(InterpreterAssembler* assembler) {
1338   DoBinaryOpWithFeedback<AddWithFeedbackStub>(assembler);
1339 }
1340 
1341 // Sub <src>
1342 //
1343 // Subtract register <src> from accumulator.
DoSub(InterpreterAssembler * assembler)1344 void Interpreter::DoSub(InterpreterAssembler* assembler) {
1345   DoBinaryOpWithFeedback<SubtractWithFeedbackStub>(assembler);
1346 }
1347 
1348 // Mul <src>
1349 //
1350 // Multiply accumulator by register <src>.
DoMul(InterpreterAssembler * assembler)1351 void Interpreter::DoMul(InterpreterAssembler* assembler) {
1352   DoBinaryOpWithFeedback<MultiplyWithFeedbackStub>(assembler);
1353 }
1354 
1355 // Div <src>
1356 //
1357 // Divide register <src> by accumulator.
DoDiv(InterpreterAssembler * assembler)1358 void Interpreter::DoDiv(InterpreterAssembler* assembler) {
1359   DoBinaryOpWithFeedback<DivideWithFeedbackStub>(assembler);
1360 }
1361 
1362 // Mod <src>
1363 //
1364 // Modulo register <src> by accumulator.
DoMod(InterpreterAssembler * assembler)1365 void Interpreter::DoMod(InterpreterAssembler* assembler) {
1366   DoBinaryOpWithFeedback<ModulusWithFeedbackStub>(assembler);
1367 }
1368 
DoBitwiseBinaryOp(Token::Value bitwise_op,InterpreterAssembler * assembler)1369 void Interpreter::DoBitwiseBinaryOp(Token::Value bitwise_op,
1370                                     InterpreterAssembler* assembler) {
1371   Node* reg_index = __ BytecodeOperandReg(0);
1372   Node* lhs = __ LoadRegister(reg_index);
1373   Node* rhs = __ GetAccumulator();
1374   Node* context = __ GetContext();
1375   Node* slot_index = __ BytecodeOperandIdx(1);
1376   Node* feedback_vector = __ LoadFeedbackVector();
1377 
1378   Variable var_lhs_type_feedback(assembler,
1379                                  MachineRepresentation::kTaggedSigned),
1380       var_rhs_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
1381   Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
1382       context, lhs, &var_lhs_type_feedback);
1383   Node* rhs_value = __ TruncateTaggedToWord32WithFeedback(
1384       context, rhs, &var_rhs_type_feedback);
1385   Node* result = nullptr;
1386 
1387   switch (bitwise_op) {
1388     case Token::BIT_OR: {
1389       Node* value = __ Word32Or(lhs_value, rhs_value);
1390       result = __ ChangeInt32ToTagged(value);
1391     } break;
1392     case Token::BIT_AND: {
1393       Node* value = __ Word32And(lhs_value, rhs_value);
1394       result = __ ChangeInt32ToTagged(value);
1395     } break;
1396     case Token::BIT_XOR: {
1397       Node* value = __ Word32Xor(lhs_value, rhs_value);
1398       result = __ ChangeInt32ToTagged(value);
1399     } break;
1400     case Token::SHL: {
1401       Node* value = __ Word32Shl(
1402           lhs_value, __ Word32And(rhs_value, __ Int32Constant(0x1f)));
1403       result = __ ChangeInt32ToTagged(value);
1404     } break;
1405     case Token::SHR: {
1406       Node* value = __ Word32Shr(
1407           lhs_value, __ Word32And(rhs_value, __ Int32Constant(0x1f)));
1408       result = __ ChangeUint32ToTagged(value);
1409     } break;
1410     case Token::SAR: {
1411       Node* value = __ Word32Sar(
1412           lhs_value, __ Word32And(rhs_value, __ Int32Constant(0x1f)));
1413       result = __ ChangeInt32ToTagged(value);
1414     } break;
1415     default:
1416       UNREACHABLE();
1417   }
1418 
1419   Node* result_type = __ SelectSmiConstant(
1420       __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
1421       BinaryOperationFeedback::kNumber);
1422 
1423   if (FLAG_debug_code) {
1424     Label ok(assembler);
1425     __ GotoIf(__ TaggedIsSmi(result), &ok);
1426     Node* result_map = __ LoadMap(result);
1427     __ AbortIfWordNotEqual(result_map, __ HeapNumberMapConstant(),
1428                            kExpectedHeapNumber);
1429     __ Goto(&ok);
1430     __ Bind(&ok);
1431   }
1432 
1433   Node* input_feedback =
1434       __ SmiOr(var_lhs_type_feedback.value(), var_rhs_type_feedback.value());
1435   __ UpdateFeedback(__ SmiOr(result_type, input_feedback), feedback_vector,
1436                     slot_index);
1437   __ SetAccumulator(result);
1438   __ Dispatch();
1439 }
1440 
1441 // BitwiseOr <src>
1442 //
1443 // BitwiseOr register <src> to accumulator.
DoBitwiseOr(InterpreterAssembler * assembler)1444 void Interpreter::DoBitwiseOr(InterpreterAssembler* assembler) {
1445   DoBitwiseBinaryOp(Token::BIT_OR, assembler);
1446 }
1447 
1448 // BitwiseXor <src>
1449 //
1450 // BitwiseXor register <src> to accumulator.
DoBitwiseXor(InterpreterAssembler * assembler)1451 void Interpreter::DoBitwiseXor(InterpreterAssembler* assembler) {
1452   DoBitwiseBinaryOp(Token::BIT_XOR, assembler);
1453 }
1454 
1455 // BitwiseAnd <src>
1456 //
1457 // BitwiseAnd register <src> to accumulator.
DoBitwiseAnd(InterpreterAssembler * assembler)1458 void Interpreter::DoBitwiseAnd(InterpreterAssembler* assembler) {
1459   DoBitwiseBinaryOp(Token::BIT_AND, assembler);
1460 }
1461 
1462 // ShiftLeft <src>
1463 //
1464 // Left shifts register <src> by the count specified in the accumulator.
1465 // Register <src> is converted to an int32 and the accumulator to uint32
1466 // before the operation. 5 lsb bits from the accumulator are used as count
1467 // i.e. <src> << (accumulator & 0x1F).
DoShiftLeft(InterpreterAssembler * assembler)1468 void Interpreter::DoShiftLeft(InterpreterAssembler* assembler) {
1469   DoBitwiseBinaryOp(Token::SHL, assembler);
1470 }
1471 
1472 // ShiftRight <src>
1473 //
1474 // Right shifts register <src> by the count specified in the accumulator.
1475 // Result is sign extended. Register <src> is converted to an int32 and the
1476 // accumulator to uint32 before the operation. 5 lsb bits from the accumulator
1477 // are used as count i.e. <src> >> (accumulator & 0x1F).
DoShiftRight(InterpreterAssembler * assembler)1478 void Interpreter::DoShiftRight(InterpreterAssembler* assembler) {
1479   DoBitwiseBinaryOp(Token::SAR, assembler);
1480 }
1481 
1482 // ShiftRightLogical <src>
1483 //
1484 // Right Shifts register <src> by the count specified in the accumulator.
1485 // Result is zero-filled. The accumulator and register <src> are converted to
1486 // uint32 before the operation 5 lsb bits from the accumulator are used as
1487 // count i.e. <src> << (accumulator & 0x1F).
DoShiftRightLogical(InterpreterAssembler * assembler)1488 void Interpreter::DoShiftRightLogical(InterpreterAssembler* assembler) {
1489   DoBitwiseBinaryOp(Token::SHR, assembler);
1490 }
1491 
1492 // AddSmi <imm> <reg>
1493 //
1494 // Adds an immediate value <imm> to register <reg>. For this
1495 // operation <reg> is the lhs operand and <imm> is the <rhs> operand.
DoAddSmi(InterpreterAssembler * assembler)1496 void Interpreter::DoAddSmi(InterpreterAssembler* assembler) {
1497   Variable var_result(assembler, MachineRepresentation::kTagged);
1498   Label fastpath(assembler), slowpath(assembler, Label::kDeferred),
1499       end(assembler);
1500 
1501   Node* reg_index = __ BytecodeOperandReg(1);
1502   Node* left = __ LoadRegister(reg_index);
1503   Node* right = __ BytecodeOperandImmSmi(0);
1504   Node* slot_index = __ BytecodeOperandIdx(2);
1505   Node* feedback_vector = __ LoadFeedbackVector();
1506 
1507   // {right} is known to be a Smi.
1508   // Check if the {left} is a Smi take the fast path.
1509   __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath);
1510   __ Bind(&fastpath);
1511   {
1512     // Try fast Smi addition first.
1513     Node* pair = __ IntPtrAddWithOverflow(__ BitcastTaggedToWord(left),
1514                                           __ BitcastTaggedToWord(right));
1515     Node* overflow = __ Projection(1, pair);
1516 
1517     // Check if the Smi additon overflowed.
1518     Label if_notoverflow(assembler);
1519     __ Branch(overflow, &slowpath, &if_notoverflow);
1520     __ Bind(&if_notoverflow);
1521     {
1522       __ UpdateFeedback(__ SmiConstant(BinaryOperationFeedback::kSignedSmall),
1523                         feedback_vector, slot_index);
1524       var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair)));
1525       __ Goto(&end);
1526     }
1527   }
1528   __ Bind(&slowpath);
1529   {
1530     Node* context = __ GetContext();
1531     AddWithFeedbackStub stub(__ isolate());
1532     Callable callable =
1533         Callable(stub.GetCode(), AddWithFeedbackStub::Descriptor(__ isolate()));
1534     var_result.Bind(__ CallStub(callable, context, left, right,
1535                                 __ TruncateWordToWord32(slot_index),
1536                                 feedback_vector));
1537     __ Goto(&end);
1538   }
1539   __ Bind(&end);
1540   {
1541     __ SetAccumulator(var_result.value());
1542     __ Dispatch();
1543   }
1544 }
1545 
1546 // SubSmi <imm> <reg>
1547 //
1548 // Subtracts an immediate value <imm> to register <reg>. For this
1549 // operation <reg> is the lhs operand and <imm> is the rhs operand.
DoSubSmi(InterpreterAssembler * assembler)1550 void Interpreter::DoSubSmi(InterpreterAssembler* assembler) {
1551   Variable var_result(assembler, MachineRepresentation::kTagged);
1552   Label fastpath(assembler), slowpath(assembler, Label::kDeferred),
1553       end(assembler);
1554 
1555   Node* reg_index = __ BytecodeOperandReg(1);
1556   Node* left = __ LoadRegister(reg_index);
1557   Node* right = __ BytecodeOperandImmSmi(0);
1558   Node* slot_index = __ BytecodeOperandIdx(2);
1559   Node* feedback_vector = __ LoadFeedbackVector();
1560 
1561   // {right} is known to be a Smi.
1562   // Check if the {left} is a Smi take the fast path.
1563   __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath);
1564   __ Bind(&fastpath);
1565   {
1566     // Try fast Smi subtraction first.
1567     Node* pair = __ IntPtrSubWithOverflow(__ BitcastTaggedToWord(left),
1568                                           __ BitcastTaggedToWord(right));
1569     Node* overflow = __ Projection(1, pair);
1570 
1571     // Check if the Smi subtraction overflowed.
1572     Label if_notoverflow(assembler);
1573     __ Branch(overflow, &slowpath, &if_notoverflow);
1574     __ Bind(&if_notoverflow);
1575     {
1576       __ UpdateFeedback(__ SmiConstant(BinaryOperationFeedback::kSignedSmall),
1577                         feedback_vector, slot_index);
1578       var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair)));
1579       __ Goto(&end);
1580     }
1581   }
1582   __ Bind(&slowpath);
1583   {
1584     Node* context = __ GetContext();
1585     SubtractWithFeedbackStub stub(__ isolate());
1586     Callable callable = Callable(
1587         stub.GetCode(), SubtractWithFeedbackStub::Descriptor(__ isolate()));
1588     var_result.Bind(__ CallStub(callable, context, left, right,
1589                                 __ TruncateWordToWord32(slot_index),
1590                                 feedback_vector));
1591     __ Goto(&end);
1592   }
1593   __ Bind(&end);
1594   {
1595     __ SetAccumulator(var_result.value());
1596     __ Dispatch();
1597   }
1598 }
1599 
1600 // BitwiseOr <imm> <reg>
1601 //
1602 // BitwiseOr <reg> with <imm>. For this operation <reg> is the lhs
1603 // operand and <imm> is the rhs operand.
DoBitwiseOrSmi(InterpreterAssembler * assembler)1604 void Interpreter::DoBitwiseOrSmi(InterpreterAssembler* assembler) {
1605   Node* reg_index = __ BytecodeOperandReg(1);
1606   Node* left = __ LoadRegister(reg_index);
1607   Node* right = __ BytecodeOperandImmSmi(0);
1608   Node* context = __ GetContext();
1609   Node* slot_index = __ BytecodeOperandIdx(2);
1610   Node* feedback_vector = __ LoadFeedbackVector();
1611   Variable var_lhs_type_feedback(assembler,
1612                                  MachineRepresentation::kTaggedSigned);
1613   Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
1614       context, left, &var_lhs_type_feedback);
1615   Node* rhs_value = __ SmiToWord32(right);
1616   Node* value = __ Word32Or(lhs_value, rhs_value);
1617   Node* result = __ ChangeInt32ToTagged(value);
1618   Node* result_type = __ SelectSmiConstant(
1619       __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
1620       BinaryOperationFeedback::kNumber);
1621   __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()),
1622                     feedback_vector, slot_index);
1623   __ SetAccumulator(result);
1624   __ Dispatch();
1625 }
1626 
1627 // BitwiseAnd <imm> <reg>
1628 //
1629 // BitwiseAnd <reg> with <imm>. For this operation <reg> is the lhs
1630 // operand and <imm> is the rhs operand.
DoBitwiseAndSmi(InterpreterAssembler * assembler)1631 void Interpreter::DoBitwiseAndSmi(InterpreterAssembler* assembler) {
1632   Node* reg_index = __ BytecodeOperandReg(1);
1633   Node* left = __ LoadRegister(reg_index);
1634   Node* right = __ BytecodeOperandImmSmi(0);
1635   Node* context = __ GetContext();
1636   Node* slot_index = __ BytecodeOperandIdx(2);
1637   Node* feedback_vector = __ LoadFeedbackVector();
1638   Variable var_lhs_type_feedback(assembler,
1639                                  MachineRepresentation::kTaggedSigned);
1640   Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
1641       context, left, &var_lhs_type_feedback);
1642   Node* rhs_value = __ SmiToWord32(right);
1643   Node* value = __ Word32And(lhs_value, rhs_value);
1644   Node* result = __ ChangeInt32ToTagged(value);
1645   Node* result_type = __ SelectSmiConstant(
1646       __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
1647       BinaryOperationFeedback::kNumber);
1648   __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()),
1649                     feedback_vector, slot_index);
1650   __ SetAccumulator(result);
1651   __ Dispatch();
1652 }
1653 
1654 // ShiftLeftSmi <imm> <reg>
1655 //
1656 // Left shifts register <src> by the count specified in <imm>.
1657 // Register <src> is converted to an int32 before the operation. The 5
1658 // lsb bits from <imm> are used as count i.e. <src> << (<imm> & 0x1F).
DoShiftLeftSmi(InterpreterAssembler * assembler)1659 void Interpreter::DoShiftLeftSmi(InterpreterAssembler* assembler) {
1660   Node* reg_index = __ BytecodeOperandReg(1);
1661   Node* left = __ LoadRegister(reg_index);
1662   Node* right = __ BytecodeOperandImmSmi(0);
1663   Node* context = __ GetContext();
1664   Node* slot_index = __ BytecodeOperandIdx(2);
1665   Node* feedback_vector = __ LoadFeedbackVector();
1666   Variable var_lhs_type_feedback(assembler,
1667                                  MachineRepresentation::kTaggedSigned);
1668   Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
1669       context, left, &var_lhs_type_feedback);
1670   Node* rhs_value = __ SmiToWord32(right);
1671   Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f));
1672   Node* value = __ Word32Shl(lhs_value, shift_count);
1673   Node* result = __ ChangeInt32ToTagged(value);
1674   Node* result_type = __ SelectSmiConstant(
1675       __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
1676       BinaryOperationFeedback::kNumber);
1677   __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()),
1678                     feedback_vector, slot_index);
1679   __ SetAccumulator(result);
1680   __ Dispatch();
1681 }
1682 
1683 // ShiftRightSmi <imm> <reg>
1684 //
1685 // Right shifts register <src> by the count specified in <imm>.
1686 // Register <src> is converted to an int32 before the operation. The 5
1687 // lsb bits from <imm> are used as count i.e. <src> << (<imm> & 0x1F).
DoShiftRightSmi(InterpreterAssembler * assembler)1688 void Interpreter::DoShiftRightSmi(InterpreterAssembler* assembler) {
1689   Node* reg_index = __ BytecodeOperandReg(1);
1690   Node* left = __ LoadRegister(reg_index);
1691   Node* right = __ BytecodeOperandImmSmi(0);
1692   Node* context = __ GetContext();
1693   Node* slot_index = __ BytecodeOperandIdx(2);
1694   Node* feedback_vector = __ LoadFeedbackVector();
1695   Variable var_lhs_type_feedback(assembler,
1696                                  MachineRepresentation::kTaggedSigned);
1697   Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
1698       context, left, &var_lhs_type_feedback);
1699   Node* rhs_value = __ SmiToWord32(right);
1700   Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f));
1701   Node* value = __ Word32Sar(lhs_value, shift_count);
1702   Node* result = __ ChangeInt32ToTagged(value);
1703   Node* result_type = __ SelectSmiConstant(
1704       __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
1705       BinaryOperationFeedback::kNumber);
1706   __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()),
1707                     feedback_vector, slot_index);
1708   __ SetAccumulator(result);
1709   __ Dispatch();
1710 }
1711 
BuildUnaryOp(Callable callable,InterpreterAssembler * assembler)1712 Node* Interpreter::BuildUnaryOp(Callable callable,
1713                                 InterpreterAssembler* assembler) {
1714   Node* target = __ HeapConstant(callable.code());
1715   Node* accumulator = __ GetAccumulator();
1716   Node* context = __ GetContext();
1717   return __ CallStub(callable.descriptor(), target, context, accumulator);
1718 }
1719 
1720 template <class Generator>
DoUnaryOpWithFeedback(InterpreterAssembler * assembler)1721 void Interpreter::DoUnaryOpWithFeedback(InterpreterAssembler* assembler) {
1722   Node* value = __ GetAccumulator();
1723   Node* context = __ GetContext();
1724   Node* slot_index = __ BytecodeOperandIdx(0);
1725   Node* feedback_vector = __ LoadFeedbackVector();
1726   Node* result = Generator::Generate(assembler, value, context, feedback_vector,
1727                                      slot_index);
1728   __ SetAccumulator(result);
1729   __ Dispatch();
1730 }
1731 
1732 // ToName
1733 //
1734 // Convert the object referenced by the accumulator to a name.
DoToName(InterpreterAssembler * assembler)1735 void Interpreter::DoToName(InterpreterAssembler* assembler) {
1736   Node* object = __ GetAccumulator();
1737   Node* context = __ GetContext();
1738   Node* result = __ ToName(context, object);
1739   __ StoreRegister(result, __ BytecodeOperandReg(0));
1740   __ Dispatch();
1741 }
1742 
1743 // ToNumber
1744 //
1745 // Convert the object referenced by the accumulator to a number.
DoToNumber(InterpreterAssembler * assembler)1746 void Interpreter::DoToNumber(InterpreterAssembler* assembler) {
1747   Node* object = __ GetAccumulator();
1748   Node* context = __ GetContext();
1749   Node* result = __ ToNumber(context, object);
1750   __ StoreRegister(result, __ BytecodeOperandReg(0));
1751   __ Dispatch();
1752 }
1753 
1754 // ToObject
1755 //
1756 // Convert the object referenced by the accumulator to a JSReceiver.
DoToObject(InterpreterAssembler * assembler)1757 void Interpreter::DoToObject(InterpreterAssembler* assembler) {
1758   Node* result = BuildUnaryOp(CodeFactory::ToObject(isolate_), assembler);
1759   __ StoreRegister(result, __ BytecodeOperandReg(0));
1760   __ Dispatch();
1761 }
1762 
1763 // Inc
1764 //
1765 // Increments value in the accumulator by one.
DoInc(InterpreterAssembler * assembler)1766 void Interpreter::DoInc(InterpreterAssembler* assembler) {
1767   typedef CodeStubAssembler::Label Label;
1768   typedef compiler::Node Node;
1769   typedef CodeStubAssembler::Variable Variable;
1770 
1771   Node* value = __ GetAccumulator();
1772   Node* context = __ GetContext();
1773   Node* slot_index = __ BytecodeOperandIdx(0);
1774   Node* feedback_vector = __ LoadFeedbackVector();
1775 
1776   // Shared entry for floating point increment.
1777   Label do_finc(assembler), end(assembler);
1778   Variable var_finc_value(assembler, MachineRepresentation::kFloat64);
1779 
1780   // We might need to try again due to ToNumber conversion.
1781   Variable value_var(assembler, MachineRepresentation::kTagged);
1782   Variable result_var(assembler, MachineRepresentation::kTagged);
1783   Variable var_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
1784   Variable* loop_vars[] = {&value_var, &var_type_feedback};
1785   Label start(assembler, 2, loop_vars);
1786   value_var.Bind(value);
1787   var_type_feedback.Bind(
1788       assembler->SmiConstant(BinaryOperationFeedback::kNone));
1789   assembler->Goto(&start);
1790   assembler->Bind(&start);
1791   {
1792     value = value_var.value();
1793 
1794     Label if_issmi(assembler), if_isnotsmi(assembler);
1795     assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
1796 
1797     assembler->Bind(&if_issmi);
1798     {
1799       // Try fast Smi addition first.
1800       Node* one = assembler->SmiConstant(Smi::FromInt(1));
1801       Node* pair = assembler->IntPtrAddWithOverflow(
1802           assembler->BitcastTaggedToWord(value),
1803           assembler->BitcastTaggedToWord(one));
1804       Node* overflow = assembler->Projection(1, pair);
1805 
1806       // Check if the Smi addition overflowed.
1807       Label if_overflow(assembler), if_notoverflow(assembler);
1808       assembler->Branch(overflow, &if_overflow, &if_notoverflow);
1809 
1810       assembler->Bind(&if_notoverflow);
1811       var_type_feedback.Bind(assembler->SmiOr(
1812           var_type_feedback.value(),
1813           assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall)));
1814       result_var.Bind(
1815           assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair)));
1816       assembler->Goto(&end);
1817 
1818       assembler->Bind(&if_overflow);
1819       {
1820         var_finc_value.Bind(assembler->SmiToFloat64(value));
1821         assembler->Goto(&do_finc);
1822       }
1823     }
1824 
1825     assembler->Bind(&if_isnotsmi);
1826     {
1827       // Check if the value is a HeapNumber.
1828       Label if_valueisnumber(assembler),
1829           if_valuenotnumber(assembler, Label::kDeferred);
1830       Node* value_map = assembler->LoadMap(value);
1831       assembler->Branch(assembler->IsHeapNumberMap(value_map),
1832                         &if_valueisnumber, &if_valuenotnumber);
1833 
1834       assembler->Bind(&if_valueisnumber);
1835       {
1836         // Load the HeapNumber value.
1837         var_finc_value.Bind(assembler->LoadHeapNumberValue(value));
1838         assembler->Goto(&do_finc);
1839       }
1840 
1841       assembler->Bind(&if_valuenotnumber);
1842       {
1843         // We do not require an Or with earlier feedback here because once we
1844         // convert the value to a number, we cannot reach this path. We can
1845         // only reach this path on the first pass when the feedback is kNone.
1846         CSA_ASSERT(assembler,
1847                    assembler->SmiEqual(
1848                        var_type_feedback.value(),
1849                        assembler->SmiConstant(BinaryOperationFeedback::kNone)));
1850 
1851         Label if_valueisoddball(assembler), if_valuenotoddball(assembler);
1852         Node* instance_type = assembler->LoadMapInstanceType(value_map);
1853         Node* is_oddball = assembler->Word32Equal(
1854             instance_type, assembler->Int32Constant(ODDBALL_TYPE));
1855         assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball);
1856 
1857         assembler->Bind(&if_valueisoddball);
1858         {
1859           // Convert Oddball to Number and check again.
1860           value_var.Bind(
1861               assembler->LoadObjectField(value, Oddball::kToNumberOffset));
1862           var_type_feedback.Bind(assembler->SmiConstant(
1863               BinaryOperationFeedback::kNumberOrOddball));
1864           assembler->Goto(&start);
1865         }
1866 
1867         assembler->Bind(&if_valuenotoddball);
1868         {
1869           // Convert to a Number first and try again.
1870           Callable callable =
1871               CodeFactory::NonNumberToNumber(assembler->isolate());
1872           var_type_feedback.Bind(
1873               assembler->SmiConstant(BinaryOperationFeedback::kAny));
1874           value_var.Bind(assembler->CallStub(callable, context, value));
1875           assembler->Goto(&start);
1876         }
1877       }
1878     }
1879   }
1880 
1881   assembler->Bind(&do_finc);
1882   {
1883     Node* finc_value = var_finc_value.value();
1884     Node* one = assembler->Float64Constant(1.0);
1885     Node* finc_result = assembler->Float64Add(finc_value, one);
1886     var_type_feedback.Bind(assembler->SmiOr(
1887         var_type_feedback.value(),
1888         assembler->SmiConstant(BinaryOperationFeedback::kNumber)));
1889     result_var.Bind(assembler->AllocateHeapNumberWithValue(finc_result));
1890     assembler->Goto(&end);
1891   }
1892 
1893   assembler->Bind(&end);
1894   assembler->UpdateFeedback(var_type_feedback.value(), feedback_vector,
1895                             slot_index);
1896 
1897   __ SetAccumulator(result_var.value());
1898   __ Dispatch();
1899 }
1900 
1901 // Dec
1902 //
1903 // Decrements value in the accumulator by one.
DoDec(InterpreterAssembler * assembler)1904 void Interpreter::DoDec(InterpreterAssembler* assembler) {
1905   typedef CodeStubAssembler::Label Label;
1906   typedef compiler::Node Node;
1907   typedef CodeStubAssembler::Variable Variable;
1908 
1909   Node* value = __ GetAccumulator();
1910   Node* context = __ GetContext();
1911   Node* slot_index = __ BytecodeOperandIdx(0);
1912   Node* feedback_vector = __ LoadFeedbackVector();
1913 
1914   // Shared entry for floating point decrement.
1915   Label do_fdec(assembler), end(assembler);
1916   Variable var_fdec_value(assembler, MachineRepresentation::kFloat64);
1917 
1918   // We might need to try again due to ToNumber conversion.
1919   Variable value_var(assembler, MachineRepresentation::kTagged);
1920   Variable result_var(assembler, MachineRepresentation::kTagged);
1921   Variable var_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
1922   Variable* loop_vars[] = {&value_var, &var_type_feedback};
1923   Label start(assembler, 2, loop_vars);
1924   var_type_feedback.Bind(
1925       assembler->SmiConstant(BinaryOperationFeedback::kNone));
1926   value_var.Bind(value);
1927   assembler->Goto(&start);
1928   assembler->Bind(&start);
1929   {
1930     value = value_var.value();
1931 
1932     Label if_issmi(assembler), if_isnotsmi(assembler);
1933     assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
1934 
1935     assembler->Bind(&if_issmi);
1936     {
1937       // Try fast Smi subtraction first.
1938       Node* one = assembler->SmiConstant(Smi::FromInt(1));
1939       Node* pair = assembler->IntPtrSubWithOverflow(
1940           assembler->BitcastTaggedToWord(value),
1941           assembler->BitcastTaggedToWord(one));
1942       Node* overflow = assembler->Projection(1, pair);
1943 
1944       // Check if the Smi subtraction overflowed.
1945       Label if_overflow(assembler), if_notoverflow(assembler);
1946       assembler->Branch(overflow, &if_overflow, &if_notoverflow);
1947 
1948       assembler->Bind(&if_notoverflow);
1949       var_type_feedback.Bind(assembler->SmiOr(
1950           var_type_feedback.value(),
1951           assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall)));
1952       result_var.Bind(
1953           assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair)));
1954       assembler->Goto(&end);
1955 
1956       assembler->Bind(&if_overflow);
1957       {
1958         var_fdec_value.Bind(assembler->SmiToFloat64(value));
1959         assembler->Goto(&do_fdec);
1960       }
1961     }
1962 
1963     assembler->Bind(&if_isnotsmi);
1964     {
1965       // Check if the value is a HeapNumber.
1966       Label if_valueisnumber(assembler),
1967           if_valuenotnumber(assembler, Label::kDeferred);
1968       Node* value_map = assembler->LoadMap(value);
1969       assembler->Branch(assembler->IsHeapNumberMap(value_map),
1970                         &if_valueisnumber, &if_valuenotnumber);
1971 
1972       assembler->Bind(&if_valueisnumber);
1973       {
1974         // Load the HeapNumber value.
1975         var_fdec_value.Bind(assembler->LoadHeapNumberValue(value));
1976         assembler->Goto(&do_fdec);
1977       }
1978 
1979       assembler->Bind(&if_valuenotnumber);
1980       {
1981         // We do not require an Or with earlier feedback here because once we
1982         // convert the value to a number, we cannot reach this path. We can
1983         // only reach this path on the first pass when the feedback is kNone.
1984         CSA_ASSERT(assembler,
1985                    assembler->SmiEqual(
1986                        var_type_feedback.value(),
1987                        assembler->SmiConstant(BinaryOperationFeedback::kNone)));
1988 
1989         Label if_valueisoddball(assembler), if_valuenotoddball(assembler);
1990         Node* instance_type = assembler->LoadMapInstanceType(value_map);
1991         Node* is_oddball = assembler->Word32Equal(
1992             instance_type, assembler->Int32Constant(ODDBALL_TYPE));
1993         assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball);
1994 
1995         assembler->Bind(&if_valueisoddball);
1996         {
1997           // Convert Oddball to Number and check again.
1998           value_var.Bind(
1999               assembler->LoadObjectField(value, Oddball::kToNumberOffset));
2000           var_type_feedback.Bind(assembler->SmiConstant(
2001               BinaryOperationFeedback::kNumberOrOddball));
2002           assembler->Goto(&start);
2003         }
2004 
2005         assembler->Bind(&if_valuenotoddball);
2006         {
2007           // Convert to a Number first and try again.
2008           Callable callable =
2009               CodeFactory::NonNumberToNumber(assembler->isolate());
2010           var_type_feedback.Bind(
2011               assembler->SmiConstant(BinaryOperationFeedback::kAny));
2012           value_var.Bind(assembler->CallStub(callable, context, value));
2013           assembler->Goto(&start);
2014         }
2015       }
2016     }
2017   }
2018 
2019   assembler->Bind(&do_fdec);
2020   {
2021     Node* fdec_value = var_fdec_value.value();
2022     Node* one = assembler->Float64Constant(1.0);
2023     Node* fdec_result = assembler->Float64Sub(fdec_value, one);
2024     var_type_feedback.Bind(assembler->SmiOr(
2025         var_type_feedback.value(),
2026         assembler->SmiConstant(BinaryOperationFeedback::kNumber)));
2027     result_var.Bind(assembler->AllocateHeapNumberWithValue(fdec_result));
2028     assembler->Goto(&end);
2029   }
2030 
2031   assembler->Bind(&end);
2032   assembler->UpdateFeedback(var_type_feedback.value(), feedback_vector,
2033                             slot_index);
2034 
2035   __ SetAccumulator(result_var.value());
2036   __ Dispatch();
2037 }
2038 
2039 // LogicalNot
2040 //
2041 // Perform logical-not on the accumulator, first casting the
2042 // accumulator to a boolean value if required.
2043 // ToBooleanLogicalNot
DoToBooleanLogicalNot(InterpreterAssembler * assembler)2044 void Interpreter::DoToBooleanLogicalNot(InterpreterAssembler* assembler) {
2045   Node* value = __ GetAccumulator();
2046   Variable result(assembler, MachineRepresentation::kTagged);
2047   Label if_true(assembler), if_false(assembler), end(assembler);
2048   Node* true_value = __ BooleanConstant(true);
2049   Node* false_value = __ BooleanConstant(false);
2050   __ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
2051   __ Bind(&if_true);
2052   {
2053     result.Bind(false_value);
2054     __ Goto(&end);
2055   }
2056   __ Bind(&if_false);
2057   {
2058     result.Bind(true_value);
2059     __ Goto(&end);
2060   }
2061   __ Bind(&end);
2062   __ SetAccumulator(result.value());
2063   __ Dispatch();
2064 }
2065 
2066 // LogicalNot
2067 //
2068 // Perform logical-not on the accumulator, which must already be a boolean
2069 // value.
DoLogicalNot(InterpreterAssembler * assembler)2070 void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
2071   Node* value = __ GetAccumulator();
2072   Variable result(assembler, MachineRepresentation::kTagged);
2073   Label if_true(assembler), if_false(assembler), end(assembler);
2074   Node* true_value = __ BooleanConstant(true);
2075   Node* false_value = __ BooleanConstant(false);
2076   __ Branch(__ WordEqual(value, true_value), &if_true, &if_false);
2077   __ Bind(&if_true);
2078   {
2079     result.Bind(false_value);
2080     __ Goto(&end);
2081   }
2082   __ Bind(&if_false);
2083   {
2084     if (FLAG_debug_code) {
2085       __ AbortIfWordNotEqual(value, false_value,
2086                              BailoutReason::kExpectedBooleanValue);
2087     }
2088     result.Bind(true_value);
2089     __ Goto(&end);
2090   }
2091   __ Bind(&end);
2092   __ SetAccumulator(result.value());
2093   __ Dispatch();
2094 }
2095 
2096 // TypeOf
2097 //
2098 // Load the accumulator with the string representating type of the
2099 // object in the accumulator.
DoTypeOf(InterpreterAssembler * assembler)2100 void Interpreter::DoTypeOf(InterpreterAssembler* assembler) {
2101   Node* value = __ GetAccumulator();
2102   Node* context = __ GetContext();
2103   Node* result = assembler->Typeof(value, context);
2104   __ SetAccumulator(result);
2105   __ Dispatch();
2106 }
2107 
DoDelete(Runtime::FunctionId function_id,InterpreterAssembler * assembler)2108 void Interpreter::DoDelete(Runtime::FunctionId function_id,
2109                            InterpreterAssembler* assembler) {
2110   Node* reg_index = __ BytecodeOperandReg(0);
2111   Node* object = __ LoadRegister(reg_index);
2112   Node* key = __ GetAccumulator();
2113   Node* context = __ GetContext();
2114   Node* result = __ CallRuntime(function_id, context, object, key);
2115   __ SetAccumulator(result);
2116   __ Dispatch();
2117 }
2118 
2119 // DeletePropertyStrict
2120 //
2121 // Delete the property specified in the accumulator from the object
2122 // referenced by the register operand following strict mode semantics.
DoDeletePropertyStrict(InterpreterAssembler * assembler)2123 void Interpreter::DoDeletePropertyStrict(InterpreterAssembler* assembler) {
2124   DoDelete(Runtime::kDeleteProperty_Strict, assembler);
2125 }
2126 
2127 // DeletePropertySloppy
2128 //
2129 // Delete the property specified in the accumulator from the object
2130 // referenced by the register operand following sloppy mode semantics.
DoDeletePropertySloppy(InterpreterAssembler * assembler)2131 void Interpreter::DoDeletePropertySloppy(InterpreterAssembler* assembler) {
2132   DoDelete(Runtime::kDeleteProperty_Sloppy, assembler);
2133 }
2134 
2135 // GetSuperConstructor
2136 //
2137 // Get the super constructor from the object referenced by the accumulator.
2138 // The result is stored in register |reg|.
DoGetSuperConstructor(InterpreterAssembler * assembler)2139 void Interpreter::DoGetSuperConstructor(InterpreterAssembler* assembler) {
2140   Node* active_function = __ GetAccumulator();
2141   Node* context = __ GetContext();
2142   Node* result = __ GetSuperConstructor(active_function, context);
2143   Node* reg = __ BytecodeOperandReg(0);
2144   __ StoreRegister(result, reg);
2145   __ Dispatch();
2146 }
2147 
DoJSCall(InterpreterAssembler * assembler,TailCallMode tail_call_mode)2148 void Interpreter::DoJSCall(InterpreterAssembler* assembler,
2149                            TailCallMode tail_call_mode) {
2150   Node* function_reg = __ BytecodeOperandReg(0);
2151   Node* function = __ LoadRegister(function_reg);
2152   Node* receiver_reg = __ BytecodeOperandReg(1);
2153   Node* receiver_arg = __ RegisterLocation(receiver_reg);
2154   Node* receiver_args_count = __ BytecodeOperandCount(2);
2155   Node* receiver_count = __ Int32Constant(1);
2156   Node* args_count = __ Int32Sub(receiver_args_count, receiver_count);
2157   Node* slot_id = __ BytecodeOperandIdx(3);
2158   Node* feedback_vector = __ LoadFeedbackVector();
2159   Node* context = __ GetContext();
2160   Node* result =
2161       __ CallJSWithFeedback(function, context, receiver_arg, args_count,
2162                             slot_id, feedback_vector, tail_call_mode);
2163   __ SetAccumulator(result);
2164   __ Dispatch();
2165 }
2166 
2167 // Call <callable> <receiver> <arg_count> <feedback_slot_id>
2168 //
2169 // Call a JSfunction or Callable in |callable| with the |receiver| and
2170 // |arg_count| arguments in subsequent registers. Collect type feedback
2171 // into |feedback_slot_id|
DoCall(InterpreterAssembler * assembler)2172 void Interpreter::DoCall(InterpreterAssembler* assembler) {
2173   DoJSCall(assembler, TailCallMode::kDisallow);
2174 }
2175 
2176 // CallProperty <callable> <receiver> <arg_count> <feedback_slot_id>
2177 //
2178 // Call a JSfunction or Callable in |callable| with the |receiver| and
2179 // |arg_count| arguments in subsequent registers. Collect type feedback into
2180 // |feedback_slot_id|. The callable is known to be a property of the receiver.
DoCallProperty(InterpreterAssembler * assembler)2181 void Interpreter::DoCallProperty(InterpreterAssembler* assembler) {
2182   // TODO(leszeks): Look into making the interpreter use the fact that the
2183   // receiver is non-null.
2184   DoJSCall(assembler, TailCallMode::kDisallow);
2185 }
2186 
2187 // TailCall <callable> <receiver> <arg_count> <feedback_slot_id>
2188 //
2189 // Tail call a JSfunction or Callable in |callable| with the |receiver| and
2190 // |arg_count| arguments in subsequent registers. Collect type feedback
2191 // into |feedback_slot_id|
DoTailCall(InterpreterAssembler * assembler)2192 void Interpreter::DoTailCall(InterpreterAssembler* assembler) {
2193   DoJSCall(assembler, TailCallMode::kAllow);
2194 }
2195 
2196 // CallRuntime <function_id> <first_arg> <arg_count>
2197 //
2198 // Call the runtime function |function_id| with the first argument in
2199 // register |first_arg| and |arg_count| arguments in subsequent
2200 // registers.
DoCallRuntime(InterpreterAssembler * assembler)2201 void Interpreter::DoCallRuntime(InterpreterAssembler* assembler) {
2202   Node* function_id = __ BytecodeOperandRuntimeId(0);
2203   Node* first_arg_reg = __ BytecodeOperandReg(1);
2204   Node* first_arg = __ RegisterLocation(first_arg_reg);
2205   Node* args_count = __ BytecodeOperandCount(2);
2206   Node* context = __ GetContext();
2207   Node* result = __ CallRuntimeN(function_id, context, first_arg, args_count);
2208   __ SetAccumulator(result);
2209   __ Dispatch();
2210 }
2211 
2212 // InvokeIntrinsic <function_id> <first_arg> <arg_count>
2213 //
2214 // Implements the semantic equivalent of calling the runtime function
2215 // |function_id| with the first argument in |first_arg| and |arg_count|
2216 // arguments in subsequent registers.
DoInvokeIntrinsic(InterpreterAssembler * assembler)2217 void Interpreter::DoInvokeIntrinsic(InterpreterAssembler* assembler) {
2218   Node* function_id = __ BytecodeOperandIntrinsicId(0);
2219   Node* first_arg_reg = __ BytecodeOperandReg(1);
2220   Node* arg_count = __ BytecodeOperandCount(2);
2221   Node* context = __ GetContext();
2222   IntrinsicsHelper helper(assembler);
2223   Node* result =
2224       helper.InvokeIntrinsic(function_id, context, first_arg_reg, arg_count);
2225   __ SetAccumulator(result);
2226   __ Dispatch();
2227 }
2228 
2229 // CallRuntimeForPair <function_id> <first_arg> <arg_count> <first_return>
2230 //
2231 // Call the runtime function |function_id| which returns a pair, with the
2232 // first argument in register |first_arg| and |arg_count| arguments in
2233 // subsequent registers. Returns the result in <first_return> and
2234 // <first_return + 1>
DoCallRuntimeForPair(InterpreterAssembler * assembler)2235 void Interpreter::DoCallRuntimeForPair(InterpreterAssembler* assembler) {
2236   // Call the runtime function.
2237   Node* function_id = __ BytecodeOperandRuntimeId(0);
2238   Node* first_arg_reg = __ BytecodeOperandReg(1);
2239   Node* first_arg = __ RegisterLocation(first_arg_reg);
2240   Node* args_count = __ BytecodeOperandCount(2);
2241   Node* context = __ GetContext();
2242   Node* result_pair =
2243       __ CallRuntimeN(function_id, context, first_arg, args_count, 2);
2244 
2245   // Store the results in <first_return> and <first_return + 1>
2246   Node* first_return_reg = __ BytecodeOperandReg(3);
2247   Node* second_return_reg = __ NextRegister(first_return_reg);
2248   Node* result0 = __ Projection(0, result_pair);
2249   Node* result1 = __ Projection(1, result_pair);
2250   __ StoreRegister(result0, first_return_reg);
2251   __ StoreRegister(result1, second_return_reg);
2252   __ Dispatch();
2253 }
2254 
2255 // CallJSRuntime <context_index> <receiver> <arg_count>
2256 //
2257 // Call the JS runtime function that has the |context_index| with the receiver
2258 // in register |receiver| and |arg_count| arguments in subsequent registers.
DoCallJSRuntime(InterpreterAssembler * assembler)2259 void Interpreter::DoCallJSRuntime(InterpreterAssembler* assembler) {
2260   Node* context_index = __ BytecodeOperandIdx(0);
2261   Node* receiver_reg = __ BytecodeOperandReg(1);
2262   Node* first_arg = __ RegisterLocation(receiver_reg);
2263   Node* receiver_args_count = __ BytecodeOperandCount(2);
2264   Node* receiver_count = __ Int32Constant(1);
2265   Node* args_count = __ Int32Sub(receiver_args_count, receiver_count);
2266 
2267   // Get the function to call from the native context.
2268   Node* context = __ GetContext();
2269   Node* native_context = __ LoadNativeContext(context);
2270   Node* function = __ LoadContextElement(native_context, context_index);
2271 
2272   // Call the function.
2273   Node* result = __ CallJS(function, context, first_arg, args_count,
2274                            TailCallMode::kDisallow);
2275   __ SetAccumulator(result);
2276   __ Dispatch();
2277 }
2278 
2279 // CallWithSpread <callable> <first_arg> <arg_count>
2280 //
2281 // Call a JSfunction or Callable in |callable| with the receiver in
2282 // |first_arg| and |arg_count - 1| arguments in subsequent registers. The
2283 // final argument is always a spread.
2284 //
DoCallWithSpread(InterpreterAssembler * assembler)2285 void Interpreter::DoCallWithSpread(InterpreterAssembler* assembler) {
2286   Node* callable_reg = __ BytecodeOperandReg(0);
2287   Node* callable = __ LoadRegister(callable_reg);
2288   Node* receiver_reg = __ BytecodeOperandReg(1);
2289   Node* receiver_arg = __ RegisterLocation(receiver_reg);
2290   Node* receiver_args_count = __ BytecodeOperandCount(2);
2291   Node* receiver_count = __ Int32Constant(1);
2292   Node* args_count = __ Int32Sub(receiver_args_count, receiver_count);
2293   Node* context = __ GetContext();
2294 
2295   // Call into Runtime function CallWithSpread which does everything.
2296   Node* result =
2297       __ CallJSWithSpread(callable, context, receiver_arg, args_count);
2298   __ SetAccumulator(result);
2299   __ Dispatch();
2300 }
2301 
2302 // ConstructWithSpread <first_arg> <arg_count>
2303 //
2304 // Call the constructor in |constructor| with the first argument in register
2305 // |first_arg| and |arg_count| arguments in subsequent registers. The final
2306 // argument is always a spread. The new.target is in the accumulator.
2307 //
DoConstructWithSpread(InterpreterAssembler * assembler)2308 void Interpreter::DoConstructWithSpread(InterpreterAssembler* assembler) {
2309   Node* new_target = __ GetAccumulator();
2310   Node* constructor_reg = __ BytecodeOperandReg(0);
2311   Node* constructor = __ LoadRegister(constructor_reg);
2312   Node* first_arg_reg = __ BytecodeOperandReg(1);
2313   Node* first_arg = __ RegisterLocation(first_arg_reg);
2314   Node* args_count = __ BytecodeOperandCount(2);
2315   Node* context = __ GetContext();
2316   Node* result = __ ConstructWithSpread(constructor, context, new_target,
2317                                         first_arg, args_count);
2318   __ SetAccumulator(result);
2319   __ Dispatch();
2320 }
2321 
2322 // Construct <constructor> <first_arg> <arg_count>
2323 //
2324 // Call operator construct with |constructor| and the first argument in
2325 // register |first_arg| and |arg_count| arguments in subsequent
2326 // registers. The new.target is in the accumulator.
2327 //
DoConstruct(InterpreterAssembler * assembler)2328 void Interpreter::DoConstruct(InterpreterAssembler* assembler) {
2329   Node* new_target = __ GetAccumulator();
2330   Node* constructor_reg = __ BytecodeOperandReg(0);
2331   Node* constructor = __ LoadRegister(constructor_reg);
2332   Node* first_arg_reg = __ BytecodeOperandReg(1);
2333   Node* first_arg = __ RegisterLocation(first_arg_reg);
2334   Node* args_count = __ BytecodeOperandCount(2);
2335   Node* slot_id = __ BytecodeOperandIdx(3);
2336   Node* feedback_vector = __ LoadFeedbackVector();
2337   Node* context = __ GetContext();
2338   Node* result = __ Construct(constructor, context, new_target, first_arg,
2339                               args_count, slot_id, feedback_vector);
2340   __ SetAccumulator(result);
2341   __ Dispatch();
2342 }
2343 
2344 // TestEqual <src>
2345 //
2346 // Test if the value in the <src> register equals the accumulator.
DoTestEqual(InterpreterAssembler * assembler)2347 void Interpreter::DoTestEqual(InterpreterAssembler* assembler) {
2348   DoCompareOpWithFeedback(Token::Value::EQ, assembler);
2349 }
2350 
2351 // TestNotEqual <src>
2352 //
2353 // Test if the value in the <src> register is not equal to the accumulator.
DoTestNotEqual(InterpreterAssembler * assembler)2354 void Interpreter::DoTestNotEqual(InterpreterAssembler* assembler) {
2355   DoCompareOpWithFeedback(Token::Value::NE, assembler);
2356 }
2357 
2358 // TestEqualStrict <src>
2359 //
2360 // Test if the value in the <src> register is strictly equal to the accumulator.
DoTestEqualStrict(InterpreterAssembler * assembler)2361 void Interpreter::DoTestEqualStrict(InterpreterAssembler* assembler) {
2362   DoCompareOpWithFeedback(Token::Value::EQ_STRICT, assembler);
2363 }
2364 
2365 // TestLessThan <src>
2366 //
2367 // Test if the value in the <src> register is less than the accumulator.
DoTestLessThan(InterpreterAssembler * assembler)2368 void Interpreter::DoTestLessThan(InterpreterAssembler* assembler) {
2369   DoCompareOpWithFeedback(Token::Value::LT, assembler);
2370 }
2371 
2372 // TestGreaterThan <src>
2373 //
2374 // Test if the value in the <src> register is greater than the accumulator.
DoTestGreaterThan(InterpreterAssembler * assembler)2375 void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) {
2376   DoCompareOpWithFeedback(Token::Value::GT, assembler);
2377 }
2378 
2379 // TestLessThanOrEqual <src>
2380 //
2381 // Test if the value in the <src> register is less than or equal to the
2382 // accumulator.
DoTestLessThanOrEqual(InterpreterAssembler * assembler)2383 void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) {
2384   DoCompareOpWithFeedback(Token::Value::LTE, assembler);
2385 }
2386 
2387 // TestGreaterThanOrEqual <src>
2388 //
2389 // Test if the value in the <src> register is greater than or equal to the
2390 // accumulator.
DoTestGreaterThanOrEqual(InterpreterAssembler * assembler)2391 void Interpreter::DoTestGreaterThanOrEqual(InterpreterAssembler* assembler) {
2392   DoCompareOpWithFeedback(Token::Value::GTE, assembler);
2393 }
2394 
2395 // TestIn <src>
2396 //
2397 // Test if the object referenced by the register operand is a property of the
2398 // object referenced by the accumulator.
DoTestIn(InterpreterAssembler * assembler)2399 void Interpreter::DoTestIn(InterpreterAssembler* assembler) {
2400   DoCompareOp(Token::IN, assembler);
2401 }
2402 
2403 // TestInstanceOf <src>
2404 //
2405 // Test if the object referenced by the <src> register is an an instance of type
2406 // referenced by the accumulator.
DoTestInstanceOf(InterpreterAssembler * assembler)2407 void Interpreter::DoTestInstanceOf(InterpreterAssembler* assembler) {
2408   DoCompareOp(Token::INSTANCEOF, assembler);
2409 }
2410 
2411 // TestUndetectable <src>
2412 //
2413 // Test if the value in the <src> register equals to null/undefined. This is
2414 // done by checking undetectable bit on the map of the object.
DoTestUndetectable(InterpreterAssembler * assembler)2415 void Interpreter::DoTestUndetectable(InterpreterAssembler* assembler) {
2416   Node* reg_index = __ BytecodeOperandReg(0);
2417   Node* object = __ LoadRegister(reg_index);
2418 
2419   Label not_equal(assembler), end(assembler);
2420   // If the object is an Smi then return false.
2421   __ GotoIf(__ TaggedIsSmi(object), &not_equal);
2422 
2423   // If it is a HeapObject, load the map and check for undetectable bit.
2424   Node* map = __ LoadMap(object);
2425   Node* map_bitfield = __ LoadMapBitField(map);
2426   Node* map_undetectable =
2427       __ Word32And(map_bitfield, __ Int32Constant(1 << Map::kIsUndetectable));
2428   __ GotoIf(__ Word32Equal(map_undetectable, __ Int32Constant(0)), &not_equal);
2429 
2430   __ SetAccumulator(__ BooleanConstant(true));
2431   __ Goto(&end);
2432 
2433   __ Bind(&not_equal);
2434   {
2435     __ SetAccumulator(__ BooleanConstant(false));
2436     __ Goto(&end);
2437   }
2438 
2439   __ Bind(&end);
2440   __ Dispatch();
2441 }
2442 
2443 // TestNull <src>
2444 //
2445 // Test if the value in the <src> register is strictly equal to null.
DoTestNull(InterpreterAssembler * assembler)2446 void Interpreter::DoTestNull(InterpreterAssembler* assembler) {
2447   Node* reg_index = __ BytecodeOperandReg(0);
2448   Node* object = __ LoadRegister(reg_index);
2449   Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
2450 
2451   Label equal(assembler), end(assembler);
2452   __ GotoIf(__ WordEqual(object, null_value), &equal);
2453   __ SetAccumulator(__ BooleanConstant(false));
2454   __ Goto(&end);
2455 
2456   __ Bind(&equal);
2457   {
2458     __ SetAccumulator(__ BooleanConstant(true));
2459     __ Goto(&end);
2460   }
2461 
2462   __ Bind(&end);
2463   __ Dispatch();
2464 }
2465 
2466 // TestUndefined <src>
2467 //
2468 // Test if the value in the <src> register is strictly equal to undefined.
DoTestUndefined(InterpreterAssembler * assembler)2469 void Interpreter::DoTestUndefined(InterpreterAssembler* assembler) {
2470   Node* reg_index = __ BytecodeOperandReg(0);
2471   Node* object = __ LoadRegister(reg_index);
2472   Node* undefined_value =
2473       __ HeapConstant(isolate_->factory()->undefined_value());
2474 
2475   Label equal(assembler), end(assembler);
2476   __ GotoIf(__ WordEqual(object, undefined_value), &equal);
2477   __ SetAccumulator(__ BooleanConstant(false));
2478   __ Goto(&end);
2479 
2480   __ Bind(&equal);
2481   {
2482     __ SetAccumulator(__ BooleanConstant(true));
2483     __ Goto(&end);
2484   }
2485 
2486   __ Bind(&end);
2487   __ Dispatch();
2488 }
2489 
2490 // Jump <imm>
2491 //
2492 // Jump by number of bytes represented by the immediate operand |imm|.
DoJump(InterpreterAssembler * assembler)2493 void Interpreter::DoJump(InterpreterAssembler* assembler) {
2494   Node* relative_jump = __ BytecodeOperandUImmWord(0);
2495   __ Jump(relative_jump);
2496 }
2497 
2498 // JumpConstant <idx>
2499 //
2500 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool.
DoJumpConstant(InterpreterAssembler * assembler)2501 void Interpreter::DoJumpConstant(InterpreterAssembler* assembler) {
2502   Node* index = __ BytecodeOperandIdx(0);
2503   Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
2504   __ Jump(relative_jump);
2505 }
2506 
2507 // JumpIfTrue <imm>
2508 //
2509 // Jump by number of bytes represented by an immediate operand if the
2510 // accumulator contains true. This only works for boolean inputs, and
2511 // will misbehave if passed arbitrary input values.
DoJumpIfTrue(InterpreterAssembler * assembler)2512 void Interpreter::DoJumpIfTrue(InterpreterAssembler* assembler) {
2513   Node* accumulator = __ GetAccumulator();
2514   Node* relative_jump = __ BytecodeOperandUImmWord(0);
2515   Node* true_value = __ BooleanConstant(true);
2516   CSA_ASSERT(assembler, assembler->TaggedIsNotSmi(accumulator));
2517   CSA_ASSERT(assembler, assembler->IsBoolean(accumulator));
2518   __ JumpIfWordEqual(accumulator, true_value, relative_jump);
2519 }
2520 
2521 // JumpIfTrueConstant <idx>
2522 //
2523 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
2524 // if the accumulator contains true. This only works for boolean inputs, and
2525 // will misbehave if passed arbitrary input values.
DoJumpIfTrueConstant(InterpreterAssembler * assembler)2526 void Interpreter::DoJumpIfTrueConstant(InterpreterAssembler* assembler) {
2527   Node* accumulator = __ GetAccumulator();
2528   Node* index = __ BytecodeOperandIdx(0);
2529   Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
2530   Node* true_value = __ BooleanConstant(true);
2531   CSA_ASSERT(assembler, assembler->TaggedIsNotSmi(accumulator));
2532   CSA_ASSERT(assembler, assembler->IsBoolean(accumulator));
2533   __ JumpIfWordEqual(accumulator, true_value, relative_jump);
2534 }
2535 
2536 // JumpIfFalse <imm>
2537 //
2538 // Jump by number of bytes represented by an immediate operand if the
2539 // accumulator contains false. This only works for boolean inputs, and
2540 // will misbehave if passed arbitrary input values.
DoJumpIfFalse(InterpreterAssembler * assembler)2541 void Interpreter::DoJumpIfFalse(InterpreterAssembler* assembler) {
2542   Node* accumulator = __ GetAccumulator();
2543   Node* relative_jump = __ BytecodeOperandUImmWord(0);
2544   Node* false_value = __ BooleanConstant(false);
2545   CSA_ASSERT(assembler, assembler->TaggedIsNotSmi(accumulator));
2546   CSA_ASSERT(assembler, assembler->IsBoolean(accumulator));
2547   __ JumpIfWordEqual(accumulator, false_value, relative_jump);
2548 }
2549 
2550 // JumpIfFalseConstant <idx>
2551 //
2552 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
2553 // if the accumulator contains false. This only works for boolean inputs, and
2554 // will misbehave if passed arbitrary input values.
DoJumpIfFalseConstant(InterpreterAssembler * assembler)2555 void Interpreter::DoJumpIfFalseConstant(InterpreterAssembler* assembler) {
2556   Node* accumulator = __ GetAccumulator();
2557   Node* index = __ BytecodeOperandIdx(0);
2558   Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
2559   Node* false_value = __ BooleanConstant(false);
2560   CSA_ASSERT(assembler, assembler->TaggedIsNotSmi(accumulator));
2561   CSA_ASSERT(assembler, assembler->IsBoolean(accumulator));
2562   __ JumpIfWordEqual(accumulator, false_value, relative_jump);
2563 }
2564 
2565 // JumpIfToBooleanTrue <imm>
2566 //
2567 // Jump by number of bytes represented by an immediate operand if the object
2568 // referenced by the accumulator is true when the object is cast to boolean.
DoJumpIfToBooleanTrue(InterpreterAssembler * assembler)2569 void Interpreter::DoJumpIfToBooleanTrue(InterpreterAssembler* assembler) {
2570   Node* value = __ GetAccumulator();
2571   Node* relative_jump = __ BytecodeOperandUImmWord(0);
2572   Label if_true(assembler), if_false(assembler);
2573   __ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
2574   __ Bind(&if_true);
2575   __ Jump(relative_jump);
2576   __ Bind(&if_false);
2577   __ Dispatch();
2578 }
2579 
2580 // JumpIfToBooleanTrueConstant <idx>
2581 //
2582 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
2583 // if the object referenced by the accumulator is true when the object is cast
2584 // to boolean.
DoJumpIfToBooleanTrueConstant(InterpreterAssembler * assembler)2585 void Interpreter::DoJumpIfToBooleanTrueConstant(
2586     InterpreterAssembler* assembler) {
2587   Node* value = __ GetAccumulator();
2588   Node* index = __ BytecodeOperandIdx(0);
2589   Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
2590   Label if_true(assembler), if_false(assembler);
2591   __ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
2592   __ Bind(&if_true);
2593   __ Jump(relative_jump);
2594   __ Bind(&if_false);
2595   __ Dispatch();
2596 }
2597 
2598 // JumpIfToBooleanFalse <imm>
2599 //
2600 // Jump by number of bytes represented by an immediate operand if the object
2601 // referenced by the accumulator is false when the object is cast to boolean.
DoJumpIfToBooleanFalse(InterpreterAssembler * assembler)2602 void Interpreter::DoJumpIfToBooleanFalse(InterpreterAssembler* assembler) {
2603   Node* value = __ GetAccumulator();
2604   Node* relative_jump = __ BytecodeOperandUImmWord(0);
2605   Label if_true(assembler), if_false(assembler);
2606   __ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
2607   __ Bind(&if_true);
2608   __ Dispatch();
2609   __ Bind(&if_false);
2610   __ Jump(relative_jump);
2611 }
2612 
2613 // JumpIfToBooleanFalseConstant <idx>
2614 //
2615 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
2616 // if the object referenced by the accumulator is false when the object is cast
2617 // to boolean.
DoJumpIfToBooleanFalseConstant(InterpreterAssembler * assembler)2618 void Interpreter::DoJumpIfToBooleanFalseConstant(
2619     InterpreterAssembler* assembler) {
2620   Node* value = __ GetAccumulator();
2621   Node* index = __ BytecodeOperandIdx(0);
2622   Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
2623   Label if_true(assembler), if_false(assembler);
2624   __ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
2625   __ Bind(&if_true);
2626   __ Dispatch();
2627   __ Bind(&if_false);
2628   __ Jump(relative_jump);
2629 }
2630 
2631 // JumpIfNull <imm>
2632 //
2633 // Jump by number of bytes represented by an immediate operand if the object
2634 // referenced by the accumulator is the null constant.
DoJumpIfNull(InterpreterAssembler * assembler)2635 void Interpreter::DoJumpIfNull(InterpreterAssembler* assembler) {
2636   Node* accumulator = __ GetAccumulator();
2637   Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
2638   Node* relative_jump = __ BytecodeOperandUImmWord(0);
2639   __ JumpIfWordEqual(accumulator, null_value, relative_jump);
2640 }
2641 
2642 // JumpIfNullConstant <idx>
2643 //
2644 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
2645 // if the object referenced by the accumulator is the null constant.
DoJumpIfNullConstant(InterpreterAssembler * assembler)2646 void Interpreter::DoJumpIfNullConstant(InterpreterAssembler* assembler) {
2647   Node* accumulator = __ GetAccumulator();
2648   Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
2649   Node* index = __ BytecodeOperandIdx(0);
2650   Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
2651   __ JumpIfWordEqual(accumulator, null_value, relative_jump);
2652 }
2653 
2654 // JumpIfUndefined <imm>
2655 //
2656 // Jump by number of bytes represented by an immediate operand if the object
2657 // referenced by the accumulator is the undefined constant.
DoJumpIfUndefined(InterpreterAssembler * assembler)2658 void Interpreter::DoJumpIfUndefined(InterpreterAssembler* assembler) {
2659   Node* accumulator = __ GetAccumulator();
2660   Node* undefined_value =
2661       __ HeapConstant(isolate_->factory()->undefined_value());
2662   Node* relative_jump = __ BytecodeOperandUImmWord(0);
2663   __ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
2664 }
2665 
2666 // JumpIfUndefinedConstant <idx>
2667 //
2668 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
2669 // if the object referenced by the accumulator is the undefined constant.
DoJumpIfUndefinedConstant(InterpreterAssembler * assembler)2670 void Interpreter::DoJumpIfUndefinedConstant(InterpreterAssembler* assembler) {
2671   Node* accumulator = __ GetAccumulator();
2672   Node* undefined_value =
2673       __ HeapConstant(isolate_->factory()->undefined_value());
2674   Node* index = __ BytecodeOperandIdx(0);
2675   Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
2676   __ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
2677 }
2678 
2679 // JumpIfJSReceiver <imm>
2680 //
2681 // Jump by number of bytes represented by an immediate operand if the object
2682 // referenced by the accumulator is a JSReceiver.
DoJumpIfJSReceiver(InterpreterAssembler * assembler)2683 void Interpreter::DoJumpIfJSReceiver(InterpreterAssembler* assembler) {
2684   Node* accumulator = __ GetAccumulator();
2685   Node* relative_jump = __ BytecodeOperandUImmWord(0);
2686 
2687   Label if_object(assembler), if_notobject(assembler, Label::kDeferred),
2688       if_notsmi(assembler);
2689   __ Branch(__ TaggedIsSmi(accumulator), &if_notobject, &if_notsmi);
2690 
2691   __ Bind(&if_notsmi);
2692   __ Branch(__ IsJSReceiver(accumulator), &if_object, &if_notobject);
2693   __ Bind(&if_object);
2694   __ Jump(relative_jump);
2695 
2696   __ Bind(&if_notobject);
2697   __ Dispatch();
2698 }
2699 
2700 // JumpIfJSReceiverConstant <idx>
2701 //
2702 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool if
2703 // the object referenced by the accumulator is a JSReceiver.
DoJumpIfJSReceiverConstant(InterpreterAssembler * assembler)2704 void Interpreter::DoJumpIfJSReceiverConstant(InterpreterAssembler* assembler) {
2705   Node* accumulator = __ GetAccumulator();
2706   Node* index = __ BytecodeOperandIdx(0);
2707   Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
2708 
2709   Label if_object(assembler), if_notobject(assembler), if_notsmi(assembler);
2710   __ Branch(__ TaggedIsSmi(accumulator), &if_notobject, &if_notsmi);
2711 
2712   __ Bind(&if_notsmi);
2713   __ Branch(__ IsJSReceiver(accumulator), &if_object, &if_notobject);
2714 
2715   __ Bind(&if_object);
2716   __ Jump(relative_jump);
2717 
2718   __ Bind(&if_notobject);
2719   __ Dispatch();
2720 }
2721 
2722 // JumpIfNotHole <imm>
2723 //
2724 // Jump by number of bytes represented by an immediate operand if the object
2725 // referenced by the accumulator is the hole.
DoJumpIfNotHole(InterpreterAssembler * assembler)2726 void Interpreter::DoJumpIfNotHole(InterpreterAssembler* assembler) {
2727   Node* accumulator = __ GetAccumulator();
2728   Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
2729   Node* relative_jump = __ BytecodeOperandUImmWord(0);
2730   __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
2731 }
2732 
2733 // JumpIfNotHoleConstant <idx>
2734 //
2735 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
2736 // if the object referenced by the accumulator is the hole constant.
DoJumpIfNotHoleConstant(InterpreterAssembler * assembler)2737 void Interpreter::DoJumpIfNotHoleConstant(InterpreterAssembler* assembler) {
2738   Node* accumulator = __ GetAccumulator();
2739   Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
2740   Node* index = __ BytecodeOperandIdx(0);
2741   Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
2742   __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
2743 }
2744 
2745 // JumpLoop <imm> <loop_depth>
2746 //
2747 // Jump by number of bytes represented by the immediate operand |imm|. Also
2748 // performs a loop nesting check and potentially triggers OSR in case the
2749 // current OSR level matches (or exceeds) the specified |loop_depth|.
DoJumpLoop(InterpreterAssembler * assembler)2750 void Interpreter::DoJumpLoop(InterpreterAssembler* assembler) {
2751   Node* relative_jump = __ BytecodeOperandUImmWord(0);
2752   Node* loop_depth = __ BytecodeOperandImm(1);
2753   Node* osr_level = __ LoadOSRNestingLevel();
2754 
2755   // Check if OSR points at the given {loop_depth} are armed by comparing it to
2756   // the current {osr_level} loaded from the header of the BytecodeArray.
2757   Label ok(assembler), osr_armed(assembler, Label::kDeferred);
2758   Node* condition = __ Int32GreaterThanOrEqual(loop_depth, osr_level);
2759   __ Branch(condition, &ok, &osr_armed);
2760 
2761   __ Bind(&ok);
2762   __ JumpBackward(relative_jump);
2763 
2764   __ Bind(&osr_armed);
2765   {
2766     Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate_);
2767     Node* target = __ HeapConstant(callable.code());
2768     Node* context = __ GetContext();
2769     __ CallStub(callable.descriptor(), target, context);
2770     __ JumpBackward(relative_jump);
2771   }
2772 }
2773 
2774 // CreateRegExpLiteral <pattern_idx> <literal_idx> <flags>
2775 //
2776 // Creates a regular expression literal for literal index <literal_idx> with
2777 // <flags> and the pattern in <pattern_idx>.
DoCreateRegExpLiteral(InterpreterAssembler * assembler)2778 void Interpreter::DoCreateRegExpLiteral(InterpreterAssembler* assembler) {
2779   Node* index = __ BytecodeOperandIdx(0);
2780   Node* pattern = __ LoadConstantPoolEntry(index);
2781   Node* literal_index = __ BytecodeOperandIdxSmi(1);
2782   Node* flags = __ SmiFromWord32(__ BytecodeOperandFlag(2));
2783   Node* closure = __ LoadRegister(Register::function_closure());
2784   Node* context = __ GetContext();
2785   ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
2786   Node* result = constructor_assembler.EmitFastCloneRegExp(
2787       closure, literal_index, pattern, flags, context);
2788   __ SetAccumulator(result);
2789   __ Dispatch();
2790 }
2791 
2792 // CreateArrayLiteral <element_idx> <literal_idx> <flags>
2793 //
2794 // Creates an array literal for literal index <literal_idx> with
2795 // CreateArrayLiteral flags <flags> and constant elements in <element_idx>.
DoCreateArrayLiteral(InterpreterAssembler * assembler)2796 void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) {
2797   Node* literal_index = __ BytecodeOperandIdxSmi(1);
2798   Node* closure = __ LoadRegister(Register::function_closure());
2799   Node* context = __ GetContext();
2800   Node* bytecode_flags = __ BytecodeOperandFlag(2);
2801 
2802   Label fast_shallow_clone(assembler),
2803       call_runtime(assembler, Label::kDeferred);
2804   __ Branch(__ IsSetWord32<CreateArrayLiteralFlags::FastShallowCloneBit>(
2805                 bytecode_flags),
2806             &fast_shallow_clone, &call_runtime);
2807 
2808   __ Bind(&fast_shallow_clone);
2809   {
2810     ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
2811     Node* result = constructor_assembler.EmitFastCloneShallowArray(
2812         closure, literal_index, context, &call_runtime, TRACK_ALLOCATION_SITE);
2813     __ SetAccumulator(result);
2814     __ Dispatch();
2815   }
2816 
2817   __ Bind(&call_runtime);
2818   {
2819     Node* flags_raw =
2820         __ DecodeWordFromWord32<CreateArrayLiteralFlags::FlagsBits>(
2821             bytecode_flags);
2822     Node* flags = __ SmiTag(flags_raw);
2823     Node* index = __ BytecodeOperandIdx(0);
2824     Node* constant_elements = __ LoadConstantPoolEntry(index);
2825     Node* result =
2826         __ CallRuntime(Runtime::kCreateArrayLiteral, context, closure,
2827                        literal_index, constant_elements, flags);
2828     __ SetAccumulator(result);
2829     __ Dispatch();
2830   }
2831 }
2832 
2833 // CreateObjectLiteral <element_idx> <literal_idx> <flags>
2834 //
2835 // Creates an object literal for literal index <literal_idx> with
2836 // CreateObjectLiteralFlags <flags> and constant elements in <element_idx>.
DoCreateObjectLiteral(InterpreterAssembler * assembler)2837 void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
2838   Node* literal_index = __ BytecodeOperandIdxSmi(1);
2839   Node* bytecode_flags = __ BytecodeOperandFlag(2);
2840   Node* closure = __ LoadRegister(Register::function_closure());
2841 
2842   // Check if we can do a fast clone or have to call the runtime.
2843   Label if_fast_clone(assembler),
2844       if_not_fast_clone(assembler, Label::kDeferred);
2845   Node* fast_clone_properties_count = __ DecodeWordFromWord32<
2846       CreateObjectLiteralFlags::FastClonePropertiesCountBits>(bytecode_flags);
2847   __ Branch(__ WordNotEqual(fast_clone_properties_count, __ IntPtrConstant(0)),
2848             &if_fast_clone, &if_not_fast_clone);
2849 
2850   __ Bind(&if_fast_clone);
2851   {
2852     // If we can do a fast clone do the fast-path in FastCloneShallowObjectStub.
2853     ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
2854     Node* result = constructor_assembler.EmitFastCloneShallowObject(
2855         &if_not_fast_clone, closure, literal_index,
2856         fast_clone_properties_count);
2857     __ StoreRegister(result, __ BytecodeOperandReg(3));
2858     __ Dispatch();
2859   }
2860 
2861   __ Bind(&if_not_fast_clone);
2862   {
2863     // If we can't do a fast clone, call into the runtime.
2864     Node* index = __ BytecodeOperandIdx(0);
2865     Node* constant_elements = __ LoadConstantPoolEntry(index);
2866     Node* context = __ GetContext();
2867 
2868     Node* flags_raw =
2869         __ DecodeWordFromWord32<CreateObjectLiteralFlags::FlagsBits>(
2870             bytecode_flags);
2871     Node* flags = __ SmiTag(flags_raw);
2872 
2873     Node* result =
2874         __ CallRuntime(Runtime::kCreateObjectLiteral, context, closure,
2875                        literal_index, constant_elements, flags);
2876     __ StoreRegister(result, __ BytecodeOperandReg(3));
2877     // TODO(klaasb) build a single dispatch once the call is inlined
2878     __ Dispatch();
2879   }
2880 }
2881 
2882 // CreateClosure <index> <slot> <tenured>
2883 //
2884 // Creates a new closure for SharedFunctionInfo at position |index| in the
2885 // constant pool and with the PretenureFlag <tenured>.
DoCreateClosure(InterpreterAssembler * assembler)2886 void Interpreter::DoCreateClosure(InterpreterAssembler* assembler) {
2887   Node* index = __ BytecodeOperandIdx(0);
2888   Node* shared = __ LoadConstantPoolEntry(index);
2889   Node* flags = __ BytecodeOperandFlag(2);
2890   Node* context = __ GetContext();
2891 
2892   Label call_runtime(assembler, Label::kDeferred);
2893   __ GotoIfNot(__ IsSetWord32<CreateClosureFlags::FastNewClosureBit>(flags),
2894                &call_runtime);
2895   ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
2896   Node* vector_index = __ BytecodeOperandIdx(1);
2897   vector_index = __ SmiTag(vector_index);
2898   Node* feedback_vector = __ LoadFeedbackVector();
2899   __ SetAccumulator(constructor_assembler.EmitFastNewClosure(
2900       shared, feedback_vector, vector_index, context));
2901   __ Dispatch();
2902 
2903   __ Bind(&call_runtime);
2904   {
2905     Node* tenured_raw =
2906         __ DecodeWordFromWord32<CreateClosureFlags::PretenuredBit>(flags);
2907     Node* tenured = __ SmiTag(tenured_raw);
2908     feedback_vector = __ LoadFeedbackVector();
2909     vector_index = __ BytecodeOperandIdx(1);
2910     vector_index = __ SmiTag(vector_index);
2911     Node* result =
2912         __ CallRuntime(Runtime::kInterpreterNewClosure, context, shared,
2913                        feedback_vector, vector_index, tenured);
2914     __ SetAccumulator(result);
2915     __ Dispatch();
2916   }
2917 }
2918 
2919 // CreateBlockContext <index>
2920 //
2921 // Creates a new block context with the scope info constant at |index| and the
2922 // closure in the accumulator.
DoCreateBlockContext(InterpreterAssembler * assembler)2923 void Interpreter::DoCreateBlockContext(InterpreterAssembler* assembler) {
2924   Node* index = __ BytecodeOperandIdx(0);
2925   Node* scope_info = __ LoadConstantPoolEntry(index);
2926   Node* closure = __ GetAccumulator();
2927   Node* context = __ GetContext();
2928   __ SetAccumulator(
2929       __ CallRuntime(Runtime::kPushBlockContext, context, scope_info, closure));
2930   __ Dispatch();
2931 }
2932 
2933 // CreateCatchContext <exception> <name_idx> <scope_info_idx>
2934 //
2935 // Creates a new context for a catch block with the |exception| in a register,
2936 // the variable name at |name_idx|, the ScopeInfo at |scope_info_idx|, and the
2937 // closure in the accumulator.
DoCreateCatchContext(InterpreterAssembler * assembler)2938 void Interpreter::DoCreateCatchContext(InterpreterAssembler* assembler) {
2939   Node* exception_reg = __ BytecodeOperandReg(0);
2940   Node* exception = __ LoadRegister(exception_reg);
2941   Node* name_idx = __ BytecodeOperandIdx(1);
2942   Node* name = __ LoadConstantPoolEntry(name_idx);
2943   Node* scope_info_idx = __ BytecodeOperandIdx(2);
2944   Node* scope_info = __ LoadConstantPoolEntry(scope_info_idx);
2945   Node* closure = __ GetAccumulator();
2946   Node* context = __ GetContext();
2947   __ SetAccumulator(__ CallRuntime(Runtime::kPushCatchContext, context, name,
2948                                    exception, scope_info, closure));
2949   __ Dispatch();
2950 }
2951 
2952 // CreateFunctionContext <slots>
2953 //
2954 // Creates a new context with number of |slots| for the function closure.
DoCreateFunctionContext(InterpreterAssembler * assembler)2955 void Interpreter::DoCreateFunctionContext(InterpreterAssembler* assembler) {
2956   Node* closure = __ LoadRegister(Register::function_closure());
2957   Node* slots = __ BytecodeOperandUImm(0);
2958   Node* context = __ GetContext();
2959   ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
2960   __ SetAccumulator(constructor_assembler.EmitFastNewFunctionContext(
2961       closure, slots, context, FUNCTION_SCOPE));
2962   __ Dispatch();
2963 }
2964 
2965 // CreateEvalContext <slots>
2966 //
2967 // Creates a new context with number of |slots| for an eval closure.
DoCreateEvalContext(InterpreterAssembler * assembler)2968 void Interpreter::DoCreateEvalContext(InterpreterAssembler* assembler) {
2969   Node* closure = __ LoadRegister(Register::function_closure());
2970   Node* slots = __ BytecodeOperandUImm(0);
2971   Node* context = __ GetContext();
2972   ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
2973   __ SetAccumulator(constructor_assembler.EmitFastNewFunctionContext(
2974       closure, slots, context, EVAL_SCOPE));
2975   __ Dispatch();
2976 }
2977 
2978 // CreateWithContext <register> <scope_info_idx>
2979 //
2980 // Creates a new context with the ScopeInfo at |scope_info_idx| for a
2981 // with-statement with the object in |register| and the closure in the
2982 // accumulator.
DoCreateWithContext(InterpreterAssembler * assembler)2983 void Interpreter::DoCreateWithContext(InterpreterAssembler* assembler) {
2984   Node* reg_index = __ BytecodeOperandReg(0);
2985   Node* object = __ LoadRegister(reg_index);
2986   Node* scope_info_idx = __ BytecodeOperandIdx(1);
2987   Node* scope_info = __ LoadConstantPoolEntry(scope_info_idx);
2988   Node* closure = __ GetAccumulator();
2989   Node* context = __ GetContext();
2990   __ SetAccumulator(__ CallRuntime(Runtime::kPushWithContext, context, object,
2991                                    scope_info, closure));
2992   __ Dispatch();
2993 }
2994 
2995 // CreateMappedArguments
2996 //
2997 // Creates a new mapped arguments object.
DoCreateMappedArguments(InterpreterAssembler * assembler)2998 void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) {
2999   Node* closure = __ LoadRegister(Register::function_closure());
3000   Node* context = __ GetContext();
3001 
3002   Label if_duplicate_parameters(assembler, Label::kDeferred);
3003   Label if_not_duplicate_parameters(assembler);
3004 
3005   // Check if function has duplicate parameters.
3006   // TODO(rmcilroy): Remove this check when FastNewSloppyArgumentsStub supports
3007   // duplicate parameters.
3008   Node* shared_info =
3009       __ LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset);
3010   Node* compiler_hints = __ LoadObjectField(
3011       shared_info, SharedFunctionInfo::kHasDuplicateParametersByteOffset,
3012       MachineType::Uint8());
3013   Node* duplicate_parameters_bit = __ Int32Constant(
3014       1 << SharedFunctionInfo::kHasDuplicateParametersBitWithinByte);
3015   Node* compare = __ Word32And(compiler_hints, duplicate_parameters_bit);
3016   __ Branch(compare, &if_duplicate_parameters, &if_not_duplicate_parameters);
3017 
3018   __ Bind(&if_not_duplicate_parameters);
3019   {
3020     ArgumentsBuiltinsAssembler constructor_assembler(assembler->state());
3021     Node* result =
3022         constructor_assembler.EmitFastNewSloppyArguments(context, closure);
3023     __ SetAccumulator(result);
3024     __ Dispatch();
3025   }
3026 
3027   __ Bind(&if_duplicate_parameters);
3028   {
3029     Node* result =
3030         __ CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure);
3031     __ SetAccumulator(result);
3032     __ Dispatch();
3033   }
3034 }
3035 
3036 // CreateUnmappedArguments
3037 //
3038 // Creates a new unmapped arguments object.
DoCreateUnmappedArguments(InterpreterAssembler * assembler)3039 void Interpreter::DoCreateUnmappedArguments(InterpreterAssembler* assembler) {
3040   Node* context = __ GetContext();
3041   Node* closure = __ LoadRegister(Register::function_closure());
3042   ArgumentsBuiltinsAssembler builtins_assembler(assembler->state());
3043   Node* result =
3044       builtins_assembler.EmitFastNewStrictArguments(context, closure);
3045   __ SetAccumulator(result);
3046   __ Dispatch();
3047 }
3048 
3049 // CreateRestParameter
3050 //
3051 // Creates a new rest parameter array.
DoCreateRestParameter(InterpreterAssembler * assembler)3052 void Interpreter::DoCreateRestParameter(InterpreterAssembler* assembler) {
3053   Node* closure = __ LoadRegister(Register::function_closure());
3054   Node* context = __ GetContext();
3055   ArgumentsBuiltinsAssembler builtins_assembler(assembler->state());
3056   Node* result = builtins_assembler.EmitFastNewRestParameter(context, closure);
3057   __ SetAccumulator(result);
3058   __ Dispatch();
3059 }
3060 
3061 // StackCheck
3062 //
3063 // Performs a stack guard check.
DoStackCheck(InterpreterAssembler * assembler)3064 void Interpreter::DoStackCheck(InterpreterAssembler* assembler) {
3065   Label ok(assembler), stack_check_interrupt(assembler, Label::kDeferred);
3066 
3067   Node* interrupt = __ StackCheckTriggeredInterrupt();
3068   __ Branch(interrupt, &stack_check_interrupt, &ok);
3069 
3070   __ Bind(&ok);
3071   __ Dispatch();
3072 
3073   __ Bind(&stack_check_interrupt);
3074   {
3075     Node* context = __ GetContext();
3076     __ CallRuntime(Runtime::kStackGuard, context);
3077     __ Dispatch();
3078   }
3079 }
3080 
3081 // SetPendingMessage
3082 //
3083 // Sets the pending message to the value in the accumulator, and returns the
3084 // previous pending message in the accumulator.
DoSetPendingMessage(InterpreterAssembler * assembler)3085 void Interpreter::DoSetPendingMessage(InterpreterAssembler* assembler) {
3086   Node* pending_message = __ ExternalConstant(
3087       ExternalReference::address_of_pending_message_obj(isolate_));
3088   Node* previous_message =
3089       __ Load(MachineType::TaggedPointer(), pending_message);
3090   Node* new_message = __ GetAccumulator();
3091   __ StoreNoWriteBarrier(MachineRepresentation::kTaggedPointer, pending_message,
3092                          new_message);
3093   __ SetAccumulator(previous_message);
3094   __ Dispatch();
3095 }
3096 
3097 // Throw
3098 //
3099 // Throws the exception in the accumulator.
DoThrow(InterpreterAssembler * assembler)3100 void Interpreter::DoThrow(InterpreterAssembler* assembler) {
3101   Node* exception = __ GetAccumulator();
3102   Node* context = __ GetContext();
3103   __ CallRuntime(Runtime::kThrow, context, exception);
3104   // We shouldn't ever return from a throw.
3105   __ Abort(kUnexpectedReturnFromThrow);
3106 }
3107 
3108 // ReThrow
3109 //
3110 // Re-throws the exception in the accumulator.
DoReThrow(InterpreterAssembler * assembler)3111 void Interpreter::DoReThrow(InterpreterAssembler* assembler) {
3112   Node* exception = __ GetAccumulator();
3113   Node* context = __ GetContext();
3114   __ CallRuntime(Runtime::kReThrow, context, exception);
3115   // We shouldn't ever return from a throw.
3116   __ Abort(kUnexpectedReturnFromThrow);
3117 }
3118 
3119 // Return
3120 //
3121 // Return the value in the accumulator.
DoReturn(InterpreterAssembler * assembler)3122 void Interpreter::DoReturn(InterpreterAssembler* assembler) {
3123   __ UpdateInterruptBudgetOnReturn();
3124   Node* accumulator = __ GetAccumulator();
3125   __ Return(accumulator);
3126 }
3127 
3128 // Debugger
3129 //
3130 // Call runtime to handle debugger statement.
DoDebugger(InterpreterAssembler * assembler)3131 void Interpreter::DoDebugger(InterpreterAssembler* assembler) {
3132   Node* context = __ GetContext();
3133   __ CallStub(CodeFactory::HandleDebuggerStatement(isolate_), context);
3134   __ Dispatch();
3135 }
3136 
3137 // DebugBreak
3138 //
3139 // Call runtime to handle a debug break.
3140 #define DEBUG_BREAK(Name, ...)                                                \
3141   void Interpreter::Do##Name(InterpreterAssembler* assembler) {               \
3142     Node* context = __ GetContext();                                          \
3143     Node* accumulator = __ GetAccumulator();                                  \
3144     Node* original_handler =                                                  \
3145         __ CallRuntime(Runtime::kDebugBreakOnBytecode, context, accumulator); \
3146     __ MaybeDropFrames(context);                                              \
3147     __ DispatchToBytecodeHandler(original_handler);                           \
3148   }
3149 DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
3150 #undef DEBUG_BREAK
3151 
BuildForInPrepareResult(Node * output_register,Node * cache_type,Node * cache_array,Node * cache_length,InterpreterAssembler * assembler)3152 void Interpreter::BuildForInPrepareResult(Node* output_register,
3153                                           Node* cache_type, Node* cache_array,
3154                                           Node* cache_length,
3155                                           InterpreterAssembler* assembler) {
3156   __ StoreRegister(cache_type, output_register);
3157   output_register = __ NextRegister(output_register);
3158   __ StoreRegister(cache_array, output_register);
3159   output_register = __ NextRegister(output_register);
3160   __ StoreRegister(cache_length, output_register);
3161 }
3162 
3163 // ForInPrepare <receiver> <cache_info_triple>
3164 //
3165 // Returns state for for..in loop execution based on the object in the register
3166 // |receiver|. The object must not be null or undefined and must have been
3167 // converted to a receiver already.
3168 // The result is output in registers |cache_info_triple| to
3169 // |cache_info_triple + 2|, with the registers holding cache_type, cache_array,
3170 // and cache_length respectively.
DoForInPrepare(InterpreterAssembler * assembler)3171 void Interpreter::DoForInPrepare(InterpreterAssembler* assembler) {
3172   Node* object_register = __ BytecodeOperandReg(0);
3173   Node* output_register = __ BytecodeOperandReg(1);
3174   Node* receiver = __ LoadRegister(object_register);
3175   Node* context = __ GetContext();
3176 
3177   Node* cache_type;
3178   Node* cache_array;
3179   Node* cache_length;
3180   Label call_runtime(assembler, Label::kDeferred),
3181       nothing_to_iterate(assembler, Label::kDeferred);
3182 
3183   ObjectBuiltinsAssembler object_assembler(assembler->state());
3184   std::tie(cache_type, cache_array, cache_length) =
3185       object_assembler.EmitForInPrepare(receiver, context, &call_runtime,
3186                                         &nothing_to_iterate);
3187 
3188   BuildForInPrepareResult(output_register, cache_type, cache_array,
3189                           cache_length, assembler);
3190   __ Dispatch();
3191 
3192   __ Bind(&call_runtime);
3193   {
3194     Node* result_triple =
3195         __ CallRuntime(Runtime::kForInPrepare, context, receiver);
3196     Node* cache_type = __ Projection(0, result_triple);
3197     Node* cache_array = __ Projection(1, result_triple);
3198     Node* cache_length = __ Projection(2, result_triple);
3199     BuildForInPrepareResult(output_register, cache_type, cache_array,
3200                             cache_length, assembler);
3201     __ Dispatch();
3202   }
3203   __ Bind(&nothing_to_iterate);
3204   {
3205     // Receiver is null or undefined or descriptors are zero length.
3206     Node* zero = __ SmiConstant(0);
3207     BuildForInPrepareResult(output_register, zero, zero, zero, assembler);
3208     __ Dispatch();
3209   }
3210 }
3211 
3212 // ForInNext <receiver> <index> <cache_info_pair>
3213 //
3214 // Returns the next enumerable property in the the accumulator.
DoForInNext(InterpreterAssembler * assembler)3215 void Interpreter::DoForInNext(InterpreterAssembler* assembler) {
3216   Node* receiver_reg = __ BytecodeOperandReg(0);
3217   Node* receiver = __ LoadRegister(receiver_reg);
3218   Node* index_reg = __ BytecodeOperandReg(1);
3219   Node* index = __ LoadRegister(index_reg);
3220   Node* cache_type_reg = __ BytecodeOperandReg(2);
3221   Node* cache_type = __ LoadRegister(cache_type_reg);
3222   Node* cache_array_reg = __ NextRegister(cache_type_reg);
3223   Node* cache_array = __ LoadRegister(cache_array_reg);
3224 
3225   // Load the next key from the enumeration array.
3226   Node* key = __ LoadFixedArrayElement(cache_array, index, 0,
3227                                        CodeStubAssembler::SMI_PARAMETERS);
3228 
3229   // Check if we can use the for-in fast path potentially using the enum cache.
3230   Label if_fast(assembler), if_slow(assembler, Label::kDeferred);
3231   Node* receiver_map = __ LoadMap(receiver);
3232   __ Branch(__ WordEqual(receiver_map, cache_type), &if_fast, &if_slow);
3233   __ Bind(&if_fast);
3234   {
3235     // Enum cache in use for {receiver}, the {key} is definitely valid.
3236     __ SetAccumulator(key);
3237     __ Dispatch();
3238   }
3239   __ Bind(&if_slow);
3240   {
3241     // Record the fact that we hit the for-in slow path.
3242     Node* vector_index = __ BytecodeOperandIdx(3);
3243     Node* feedback_vector = __ LoadFeedbackVector();
3244     Node* megamorphic_sentinel =
3245         __ HeapConstant(FeedbackVector::MegamorphicSentinel(isolate_));
3246     __ StoreFixedArrayElement(feedback_vector, vector_index,
3247                               megamorphic_sentinel, SKIP_WRITE_BARRIER);
3248 
3249     // Need to filter the {key} for the {receiver}.
3250     Node* context = __ GetContext();
3251     Callable callable = CodeFactory::ForInFilter(assembler->isolate());
3252     Node* result = __ CallStub(callable, context, key, receiver);
3253     __ SetAccumulator(result);
3254     __ Dispatch();
3255   }
3256 }
3257 
3258 // ForInContinue <index> <cache_length>
3259 //
3260 // Returns false if the end of the enumerable properties has been reached.
DoForInContinue(InterpreterAssembler * assembler)3261 void Interpreter::DoForInContinue(InterpreterAssembler* assembler) {
3262   Node* index_reg = __ BytecodeOperandReg(0);
3263   Node* index = __ LoadRegister(index_reg);
3264   Node* cache_length_reg = __ BytecodeOperandReg(1);
3265   Node* cache_length = __ LoadRegister(cache_length_reg);
3266 
3267   // Check if {index} is at {cache_length} already.
3268   Label if_true(assembler), if_false(assembler), end(assembler);
3269   __ Branch(__ WordEqual(index, cache_length), &if_true, &if_false);
3270   __ Bind(&if_true);
3271   {
3272     __ SetAccumulator(__ BooleanConstant(false));
3273     __ Goto(&end);
3274   }
3275   __ Bind(&if_false);
3276   {
3277     __ SetAccumulator(__ BooleanConstant(true));
3278     __ Goto(&end);
3279   }
3280   __ Bind(&end);
3281   __ Dispatch();
3282 }
3283 
3284 // ForInStep <index>
3285 //
3286 // Increments the loop counter in register |index| and stores the result
3287 // in the accumulator.
DoForInStep(InterpreterAssembler * assembler)3288 void Interpreter::DoForInStep(InterpreterAssembler* assembler) {
3289   Node* index_reg = __ BytecodeOperandReg(0);
3290   Node* index = __ LoadRegister(index_reg);
3291   Node* one = __ SmiConstant(Smi::FromInt(1));
3292   Node* result = __ SmiAdd(index, one);
3293   __ SetAccumulator(result);
3294   __ Dispatch();
3295 }
3296 
3297 // Wide
3298 //
3299 // Prefix bytecode indicating next bytecode has wide (16-bit) operands.
DoWide(InterpreterAssembler * assembler)3300 void Interpreter::DoWide(InterpreterAssembler* assembler) {
3301   __ DispatchWide(OperandScale::kDouble);
3302 }
3303 
3304 // ExtraWide
3305 //
3306 // Prefix bytecode indicating next bytecode has extra-wide (32-bit) operands.
DoExtraWide(InterpreterAssembler * assembler)3307 void Interpreter::DoExtraWide(InterpreterAssembler* assembler) {
3308   __ DispatchWide(OperandScale::kQuadruple);
3309 }
3310 
3311 // Illegal
3312 //
3313 // An invalid bytecode aborting execution if dispatched.
DoIllegal(InterpreterAssembler * assembler)3314 void Interpreter::DoIllegal(InterpreterAssembler* assembler) {
3315   __ Abort(kInvalidBytecode);
3316 }
3317 
3318 // Nop
3319 //
3320 // No operation.
DoNop(InterpreterAssembler * assembler)3321 void Interpreter::DoNop(InterpreterAssembler* assembler) { __ Dispatch(); }
3322 
3323 // SuspendGenerator <generator>
3324 //
3325 // Exports the register file and stores it into the generator.  Also stores the
3326 // current context, the state given in the accumulator, and the current bytecode
3327 // offset (for debugging purposes) into the generator.
DoSuspendGenerator(InterpreterAssembler * assembler)3328 void Interpreter::DoSuspendGenerator(InterpreterAssembler* assembler) {
3329   Node* generator_reg = __ BytecodeOperandReg(0);
3330   Node* generator = __ LoadRegister(generator_reg);
3331 
3332   Label if_stepping(assembler, Label::kDeferred), ok(assembler);
3333   Node* step_action_address = __ ExternalConstant(
3334       ExternalReference::debug_last_step_action_address(isolate_));
3335   Node* step_action = __ Load(MachineType::Int8(), step_action_address);
3336   STATIC_ASSERT(StepIn > StepNext);
3337   STATIC_ASSERT(LastStepAction == StepIn);
3338   Node* step_next = __ Int32Constant(StepNext);
3339   __ Branch(__ Int32LessThanOrEqual(step_next, step_action), &if_stepping, &ok);
3340   __ Bind(&ok);
3341 
3342   Node* array =
3343       __ LoadObjectField(generator, JSGeneratorObject::kRegisterFileOffset);
3344   Node* context = __ GetContext();
3345   Node* state = __ GetAccumulator();
3346 
3347   __ ExportRegisterFile(array);
3348   __ StoreObjectField(generator, JSGeneratorObject::kContextOffset, context);
3349   __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset, state);
3350 
3351   Node* offset = __ SmiTag(__ BytecodeOffset());
3352   __ StoreObjectField(generator, JSGeneratorObject::kInputOrDebugPosOffset,
3353                       offset);
3354 
3355   __ Dispatch();
3356 
3357   __ Bind(&if_stepping);
3358   {
3359     Node* context = __ GetContext();
3360     __ CallRuntime(Runtime::kDebugRecordGenerator, context, generator);
3361     __ Goto(&ok);
3362   }
3363 }
3364 
3365 // ResumeGenerator <generator>
3366 //
3367 // Imports the register file stored in the generator. Also loads the
3368 // generator's state and stores it in the accumulator, before overwriting it
3369 // with kGeneratorExecuting.
DoResumeGenerator(InterpreterAssembler * assembler)3370 void Interpreter::DoResumeGenerator(InterpreterAssembler* assembler) {
3371   Node* generator_reg = __ BytecodeOperandReg(0);
3372   Node* generator = __ LoadRegister(generator_reg);
3373 
3374   __ ImportRegisterFile(
3375       __ LoadObjectField(generator, JSGeneratorObject::kRegisterFileOffset));
3376 
3377   Node* old_state =
3378       __ LoadObjectField(generator, JSGeneratorObject::kContinuationOffset);
3379   Node* new_state = __ Int32Constant(JSGeneratorObject::kGeneratorExecuting);
3380   __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset,
3381       __ SmiTag(new_state));
3382   __ SetAccumulator(old_state);
3383 
3384   __ Dispatch();
3385 }
3386 
3387 }  // namespace interpreter
3388 }  // namespace internal
3389 }  // namespace v8
3390