• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/interpreter/interpreter-assembler.h"
6 
7 #include <limits>
8 #include <ostream>
9 
10 #include "src/code-factory.h"
11 #include "src/frames.h"
12 #include "src/interface-descriptors.h"
13 #include "src/interpreter/bytecodes.h"
14 #include "src/interpreter/interpreter.h"
15 #include "src/machine-type.h"
16 #include "src/macro-assembler.h"
17 #include "src/objects-inl.h"
18 #include "src/zone/zone.h"
19 
20 namespace v8 {
21 namespace internal {
22 namespace interpreter {
23 
24 using compiler::CodeAssemblerState;
25 using compiler::Node;
26 template <class T>
27 using TNode = compiler::TNode<T>;
28 
InterpreterAssembler(CodeAssemblerState * state,Bytecode bytecode,OperandScale operand_scale)29 InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
30                                            Bytecode bytecode,
31                                            OperandScale operand_scale)
32     : CodeStubAssembler(state),
33       bytecode_(bytecode),
34       operand_scale_(operand_scale),
35       VARIABLE_CONSTRUCTOR(interpreted_frame_pointer_,
36                            MachineType::PointerRepresentation()),
37       VARIABLE_CONSTRUCTOR(
38           bytecode_array_, MachineRepresentation::kTagged,
39           Parameter(InterpreterDispatchDescriptor::kBytecodeArray)),
40       VARIABLE_CONSTRUCTOR(
41           bytecode_offset_, MachineType::PointerRepresentation(),
42           Parameter(InterpreterDispatchDescriptor::kBytecodeOffset)),
43       VARIABLE_CONSTRUCTOR(
44           dispatch_table_, MachineType::PointerRepresentation(),
45           Parameter(InterpreterDispatchDescriptor::kDispatchTable)),
46       VARIABLE_CONSTRUCTOR(
47           accumulator_, MachineRepresentation::kTagged,
48           Parameter(InterpreterDispatchDescriptor::kAccumulator)),
49       accumulator_use_(AccumulatorUse::kNone),
50       made_call_(false),
51       reloaded_frame_ptr_(false),
52       bytecode_array_valid_(true),
53       disable_stack_check_across_call_(false),
54       stack_pointer_before_call_(nullptr) {
55 #ifdef V8_TRACE_IGNITION
56   TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
57 #endif
58   RegisterCallGenerationCallbacks([this] { CallPrologue(); },
59                                   [this] { CallEpilogue(); });
60 
61   // Save the bytecode offset immediately if bytecode will make a call along the
62   // critical path, or it is a return bytecode.
63   if (Bytecodes::MakesCallAlongCriticalPath(bytecode) ||
64       Bytecodes::Returns(bytecode)) {
65     SaveBytecodeOffset();
66   }
67 }
68 
~InterpreterAssembler()69 InterpreterAssembler::~InterpreterAssembler() {
70   // If the following check fails the handler does not use the
71   // accumulator in the way described in the bytecode definitions in
72   // bytecodes.h.
73   DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
74   UnregisterCallGenerationCallbacks();
75 }
76 
GetInterpretedFramePointer()77 Node* InterpreterAssembler::GetInterpretedFramePointer() {
78   if (!interpreted_frame_pointer_.IsBound()) {
79     interpreted_frame_pointer_.Bind(LoadParentFramePointer());
80   } else if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
81              !reloaded_frame_ptr_) {
82     interpreted_frame_pointer_.Bind(LoadParentFramePointer());
83     reloaded_frame_ptr_ = true;
84   }
85   return interpreted_frame_pointer_.value();
86 }
87 
BytecodeOffset()88 Node* InterpreterAssembler::BytecodeOffset() {
89   if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
90       (bytecode_offset_.value() ==
91        Parameter(InterpreterDispatchDescriptor::kBytecodeOffset))) {
92     bytecode_offset_.Bind(ReloadBytecodeOffset());
93   }
94   return bytecode_offset_.value();
95 }
96 
ReloadBytecodeOffset()97 Node* InterpreterAssembler::ReloadBytecodeOffset() {
98   Node* offset = LoadAndUntagRegister(Register::bytecode_offset());
99   if (operand_scale() != OperandScale::kSingle) {
100     // Add one to the offset such that it points to the actual bytecode rather
101     // than the Wide / ExtraWide prefix bytecode.
102     offset = IntPtrAdd(offset, IntPtrConstant(1));
103   }
104   return offset;
105 }
106 
SaveBytecodeOffset()107 void InterpreterAssembler::SaveBytecodeOffset() {
108   Node* offset = BytecodeOffset();
109   if (operand_scale() != OperandScale::kSingle) {
110     // Subtract one from the offset such that it points to the Wide / ExtraWide
111     // prefix bytecode.
112     offset = IntPtrSub(BytecodeOffset(), IntPtrConstant(1));
113   }
114   StoreAndTagRegister(offset, Register::bytecode_offset());
115 }
116 
BytecodeArrayTaggedPointer()117 Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
118   // Force a re-load of the bytecode array after every call in case the debugger
119   // has been activated.
120   if (!bytecode_array_valid_) {
121     bytecode_array_.Bind(LoadRegister(Register::bytecode_array()));
122     bytecode_array_valid_ = true;
123   }
124   return bytecode_array_.value();
125 }
126 
DispatchTableRawPointer()127 Node* InterpreterAssembler::DispatchTableRawPointer() {
128   if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
129       (dispatch_table_.value() ==
130        Parameter(InterpreterDispatchDescriptor::kDispatchTable))) {
131     dispatch_table_.Bind(ExternalConstant(
132         ExternalReference::interpreter_dispatch_table_address(isolate())));
133   }
134   return dispatch_table_.value();
135 }
136 
GetAccumulatorUnchecked()137 Node* InterpreterAssembler::GetAccumulatorUnchecked() {
138   return accumulator_.value();
139 }
140 
GetAccumulator()141 Node* InterpreterAssembler::GetAccumulator() {
142   DCHECK(Bytecodes::ReadsAccumulator(bytecode_));
143   accumulator_use_ = accumulator_use_ | AccumulatorUse::kRead;
144   return TaggedPoisonOnSpeculation(GetAccumulatorUnchecked());
145 }
146 
SetAccumulator(Node * value)147 void InterpreterAssembler::SetAccumulator(Node* value) {
148   DCHECK(Bytecodes::WritesAccumulator(bytecode_));
149   accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
150   accumulator_.Bind(value);
151 }
152 
GetContext()153 Node* InterpreterAssembler::GetContext() {
154   return LoadRegister(Register::current_context());
155 }
156 
SetContext(Node * value)157 void InterpreterAssembler::SetContext(Node* value) {
158   StoreRegister(value, Register::current_context());
159 }
160 
GetContextAtDepth(Node * context,Node * depth)161 Node* InterpreterAssembler::GetContextAtDepth(Node* context, Node* depth) {
162   Variable cur_context(this, MachineRepresentation::kTaggedPointer);
163   cur_context.Bind(context);
164 
165   Variable cur_depth(this, MachineRepresentation::kWord32);
166   cur_depth.Bind(depth);
167 
168   Label context_found(this);
169 
170   Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context};
171   Label context_search(this, 2, context_search_loop_variables);
172 
173   // Fast path if the depth is 0.
174   Branch(Word32Equal(depth, Int32Constant(0)), &context_found, &context_search);
175 
176   // Loop until the depth is 0.
177   BIND(&context_search);
178   {
179     cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
180     cur_context.Bind(
181         LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
182 
183     Branch(Word32Equal(cur_depth.value(), Int32Constant(0)), &context_found,
184            &context_search);
185   }
186 
187   BIND(&context_found);
188   return cur_context.value();
189 }
190 
GotoIfHasContextExtensionUpToDepth(Node * context,Node * depth,Label * target)191 void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(Node* context,
192                                                               Node* depth,
193                                                               Label* target) {
194   Variable cur_context(this, MachineRepresentation::kTaggedPointer);
195   cur_context.Bind(context);
196 
197   Variable cur_depth(this, MachineRepresentation::kWord32);
198   cur_depth.Bind(depth);
199 
200   Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context};
201   Label context_search(this, 2, context_search_loop_variables);
202 
203   // Loop until the depth is 0.
204   Goto(&context_search);
205   BIND(&context_search);
206   {
207     // TODO(leszeks): We only need to do this check if the context had a sloppy
208     // eval, we could pass in a context chain bitmask to figure out which
209     // contexts actually need to be checked.
210 
211     Node* extension_slot =
212         LoadContextElement(cur_context.value(), Context::EXTENSION_INDEX);
213 
214     // Jump to the target if the extension slot is not a hole.
215     GotoIf(WordNotEqual(extension_slot, TheHoleConstant()), target);
216 
217     cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
218     cur_context.Bind(
219         LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
220 
221     GotoIf(Word32NotEqual(cur_depth.value(), Int32Constant(0)),
222            &context_search);
223   }
224 }
225 
RegisterLocation(Node * reg_index)226 Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
227   return WordPoisonOnSpeculation(
228       IntPtrAdd(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index)));
229 }
230 
RegisterLocation(Register reg)231 Node* InterpreterAssembler::RegisterLocation(Register reg) {
232   return RegisterLocation(IntPtrConstant(reg.ToOperand()));
233 }
234 
RegisterFrameOffset(Node * index)235 Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
236   return TimesPointerSize(index);
237 }
238 
LoadRegister(Node * reg_index)239 Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
240   return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
241               RegisterFrameOffset(reg_index), LoadSensitivity::kCritical);
242 }
243 
LoadRegister(Register reg)244 Node* InterpreterAssembler::LoadRegister(Register reg) {
245   return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
246               IntPtrConstant(reg.ToOperand() << kPointerSizeLog2));
247 }
248 
LoadAndUntagRegister(Register reg)249 Node* InterpreterAssembler::LoadAndUntagRegister(Register reg) {
250   return LoadAndUntagSmi(GetInterpretedFramePointer(), reg.ToOperand()
251                                                            << kPointerSizeLog2);
252 }
253 
LoadRegisterAtOperandIndex(int operand_index)254 Node* InterpreterAssembler::LoadRegisterAtOperandIndex(int operand_index) {
255   return LoadRegister(
256       BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
257 }
258 
LoadRegisterPairAtOperandIndex(int operand_index)259 std::pair<Node*, Node*> InterpreterAssembler::LoadRegisterPairAtOperandIndex(
260     int operand_index) {
261   DCHECK_EQ(OperandType::kRegPair,
262             Bytecodes::GetOperandType(bytecode_, operand_index));
263   Node* first_reg_index =
264       BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
265   Node* second_reg_index = NextRegister(first_reg_index);
266   return std::make_pair(LoadRegister(first_reg_index),
267                         LoadRegister(second_reg_index));
268 }
269 
270 InterpreterAssembler::RegListNodePair
GetRegisterListAtOperandIndex(int operand_index)271 InterpreterAssembler::GetRegisterListAtOperandIndex(int operand_index) {
272   DCHECK(Bytecodes::IsRegisterListOperandType(
273       Bytecodes::GetOperandType(bytecode_, operand_index)));
274   DCHECK_EQ(OperandType::kRegCount,
275             Bytecodes::GetOperandType(bytecode_, operand_index + 1));
276   Node* base_reg = RegisterLocation(
277       BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
278   Node* reg_count = BytecodeOperandCount(operand_index + 1);
279   return RegListNodePair(base_reg, reg_count);
280 }
281 
LoadRegisterFromRegisterList(const RegListNodePair & reg_list,int index)282 Node* InterpreterAssembler::LoadRegisterFromRegisterList(
283     const RegListNodePair& reg_list, int index) {
284   Node* location = RegisterLocationInRegisterList(reg_list, index);
285   // Location is already poisoned on speculation, so no need to poison here.
286   return Load(MachineType::AnyTagged(), location);
287 }
288 
RegisterLocationInRegisterList(const RegListNodePair & reg_list,int index)289 Node* InterpreterAssembler::RegisterLocationInRegisterList(
290     const RegListNodePair& reg_list, int index) {
291   CSA_ASSERT(this,
292              Uint32GreaterThan(reg_list.reg_count(), Int32Constant(index)));
293   Node* offset = RegisterFrameOffset(IntPtrConstant(index));
294   // Register indexes are negative, so subtract index from base location to get
295   // location.
296   return IntPtrSub(reg_list.base_reg_location(), offset);
297 }
298 
StoreRegister(Node * value,Register reg)299 void InterpreterAssembler::StoreRegister(Node* value, Register reg) {
300   StoreNoWriteBarrier(
301       MachineRepresentation::kTagged, GetInterpretedFramePointer(),
302       IntPtrConstant(reg.ToOperand() << kPointerSizeLog2), value);
303 }
304 
StoreRegister(Node * value,Node * reg_index)305 void InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
306   StoreNoWriteBarrier(MachineRepresentation::kTagged,
307                       GetInterpretedFramePointer(),
308                       RegisterFrameOffset(reg_index), value);
309 }
310 
StoreAndTagRegister(Node * value,Register reg)311 void InterpreterAssembler::StoreAndTagRegister(Node* value, Register reg) {
312   int offset = reg.ToOperand() << kPointerSizeLog2;
313   StoreAndTagSmi(GetInterpretedFramePointer(), offset, value);
314 }
315 
StoreRegisterAtOperandIndex(Node * value,int operand_index)316 void InterpreterAssembler::StoreRegisterAtOperandIndex(Node* value,
317                                                        int operand_index) {
318   StoreRegister(value,
319                 BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
320 }
321 
StoreRegisterPairAtOperandIndex(Node * value1,Node * value2,int operand_index)322 void InterpreterAssembler::StoreRegisterPairAtOperandIndex(Node* value1,
323                                                            Node* value2,
324                                                            int operand_index) {
325   DCHECK_EQ(OperandType::kRegOutPair,
326             Bytecodes::GetOperandType(bytecode_, operand_index));
327   Node* first_reg_index =
328       BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
329   StoreRegister(value1, first_reg_index);
330   Node* second_reg_index = NextRegister(first_reg_index);
331   StoreRegister(value2, second_reg_index);
332 }
333 
StoreRegisterTripleAtOperandIndex(Node * value1,Node * value2,Node * value3,int operand_index)334 void InterpreterAssembler::StoreRegisterTripleAtOperandIndex(
335     Node* value1, Node* value2, Node* value3, int operand_index) {
336   DCHECK_EQ(OperandType::kRegOutTriple,
337             Bytecodes::GetOperandType(bytecode_, operand_index));
338   Node* first_reg_index =
339       BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
340   StoreRegister(value1, first_reg_index);
341   Node* second_reg_index = NextRegister(first_reg_index);
342   StoreRegister(value2, second_reg_index);
343   Node* third_reg_index = NextRegister(second_reg_index);
344   StoreRegister(value3, third_reg_index);
345 }
346 
NextRegister(Node * reg_index)347 Node* InterpreterAssembler::NextRegister(Node* reg_index) {
348   // Register indexes are negative, so the next index is minus one.
349   return IntPtrAdd(reg_index, IntPtrConstant(-1));
350 }
351 
OperandOffset(int operand_index)352 Node* InterpreterAssembler::OperandOffset(int operand_index) {
353   return IntPtrConstant(
354       Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()));
355 }
356 
BytecodeOperandUnsignedByte(int operand_index,LoadSensitivity needs_poisoning)357 Node* InterpreterAssembler::BytecodeOperandUnsignedByte(
358     int operand_index, LoadSensitivity needs_poisoning) {
359   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
360   DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
361                                     bytecode_, operand_index, operand_scale()));
362   Node* operand_offset = OperandOffset(operand_index);
363   return Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
364               IntPtrAdd(BytecodeOffset(), operand_offset), needs_poisoning);
365 }
366 
BytecodeOperandSignedByte(int operand_index,LoadSensitivity needs_poisoning)367 Node* InterpreterAssembler::BytecodeOperandSignedByte(
368     int operand_index, LoadSensitivity needs_poisoning) {
369   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
370   DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
371                                     bytecode_, operand_index, operand_scale()));
372   Node* operand_offset = OperandOffset(operand_index);
373   return Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
374               IntPtrAdd(BytecodeOffset(), operand_offset), needs_poisoning);
375 }
376 
BytecodeOperandReadUnaligned(int relative_offset,MachineType result_type,LoadSensitivity needs_poisoning)377 Node* InterpreterAssembler::BytecodeOperandReadUnaligned(
378     int relative_offset, MachineType result_type,
379     LoadSensitivity needs_poisoning) {
380   static const int kMaxCount = 4;
381   DCHECK(!TargetSupportsUnalignedAccess());
382 
383   int count;
384   switch (result_type.representation()) {
385     case MachineRepresentation::kWord16:
386       count = 2;
387       break;
388     case MachineRepresentation::kWord32:
389       count = 4;
390       break;
391     default:
392       UNREACHABLE();
393       break;
394   }
395   MachineType msb_type =
396       result_type.IsSigned() ? MachineType::Int8() : MachineType::Uint8();
397 
398 #if V8_TARGET_LITTLE_ENDIAN
399   const int kStep = -1;
400   int msb_offset = count - 1;
401 #elif V8_TARGET_BIG_ENDIAN
402   const int kStep = 1;
403   int msb_offset = 0;
404 #else
405 #error "Unknown Architecture"
406 #endif
407 
408   // Read the most signicant bytecode into bytes[0] and then in order
409   // down to least significant in bytes[count - 1].
410   DCHECK_LE(count, kMaxCount);
411   Node* bytes[kMaxCount];
412   for (int i = 0; i < count; i++) {
413     MachineType machine_type = (i == 0) ? msb_type : MachineType::Uint8();
414     Node* offset = IntPtrConstant(relative_offset + msb_offset + i * kStep);
415     Node* array_offset = IntPtrAdd(BytecodeOffset(), offset);
416     bytes[i] = Load(machine_type, BytecodeArrayTaggedPointer(), array_offset,
417                     needs_poisoning);
418   }
419 
420   // Pack LSB to MSB.
421   Node* result = bytes[--count];
422   for (int i = 1; --count >= 0; i++) {
423     Node* shift = Int32Constant(i * kBitsPerByte);
424     Node* value = Word32Shl(bytes[count], shift);
425     result = Word32Or(value, result);
426   }
427   return result;
428 }
429 
BytecodeOperandUnsignedShort(int operand_index,LoadSensitivity needs_poisoning)430 Node* InterpreterAssembler::BytecodeOperandUnsignedShort(
431     int operand_index, LoadSensitivity needs_poisoning) {
432   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
433   DCHECK_EQ(
434       OperandSize::kShort,
435       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
436   int operand_offset =
437       Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
438   if (TargetSupportsUnalignedAccess()) {
439     return Load(MachineType::Uint16(), BytecodeArrayTaggedPointer(),
440                 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
441                 needs_poisoning);
442   } else {
443     return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint16(),
444                                         needs_poisoning);
445   }
446 }
447 
BytecodeOperandSignedShort(int operand_index,LoadSensitivity needs_poisoning)448 Node* InterpreterAssembler::BytecodeOperandSignedShort(
449     int operand_index, LoadSensitivity needs_poisoning) {
450   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
451   DCHECK_EQ(
452       OperandSize::kShort,
453       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
454   int operand_offset =
455       Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
456   if (TargetSupportsUnalignedAccess()) {
457     return Load(MachineType::Int16(), BytecodeArrayTaggedPointer(),
458                 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
459                 needs_poisoning);
460   } else {
461     return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16(),
462                                         needs_poisoning);
463   }
464 }
465 
BytecodeOperandUnsignedQuad(int operand_index,LoadSensitivity needs_poisoning)466 Node* InterpreterAssembler::BytecodeOperandUnsignedQuad(
467     int operand_index, LoadSensitivity needs_poisoning) {
468   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
469   DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
470                                     bytecode_, operand_index, operand_scale()));
471   int operand_offset =
472       Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
473   if (TargetSupportsUnalignedAccess()) {
474     return Load(MachineType::Uint32(), BytecodeArrayTaggedPointer(),
475                 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
476                 needs_poisoning);
477   } else {
478     return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint32(),
479                                         needs_poisoning);
480   }
481 }
482 
BytecodeOperandSignedQuad(int operand_index,LoadSensitivity needs_poisoning)483 Node* InterpreterAssembler::BytecodeOperandSignedQuad(
484     int operand_index, LoadSensitivity needs_poisoning) {
485   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
486   DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
487                                     bytecode_, operand_index, operand_scale()));
488   int operand_offset =
489       Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
490   if (TargetSupportsUnalignedAccess()) {
491     return Load(MachineType::Int32(), BytecodeArrayTaggedPointer(),
492                 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
493                 needs_poisoning);
494   } else {
495     return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32(),
496                                         needs_poisoning);
497   }
498 }
499 
BytecodeSignedOperand(int operand_index,OperandSize operand_size,LoadSensitivity needs_poisoning)500 Node* InterpreterAssembler::BytecodeSignedOperand(
501     int operand_index, OperandSize operand_size,
502     LoadSensitivity needs_poisoning) {
503   DCHECK(!Bytecodes::IsUnsignedOperandType(
504       Bytecodes::GetOperandType(bytecode_, operand_index)));
505   switch (operand_size) {
506     case OperandSize::kByte:
507       return BytecodeOperandSignedByte(operand_index, needs_poisoning);
508     case OperandSize::kShort:
509       return BytecodeOperandSignedShort(operand_index, needs_poisoning);
510     case OperandSize::kQuad:
511       return BytecodeOperandSignedQuad(operand_index, needs_poisoning);
512     case OperandSize::kNone:
513       UNREACHABLE();
514   }
515   return nullptr;
516 }
517 
BytecodeUnsignedOperand(int operand_index,OperandSize operand_size,LoadSensitivity needs_poisoning)518 Node* InterpreterAssembler::BytecodeUnsignedOperand(
519     int operand_index, OperandSize operand_size,
520     LoadSensitivity needs_poisoning) {
521   DCHECK(Bytecodes::IsUnsignedOperandType(
522       Bytecodes::GetOperandType(bytecode_, operand_index)));
523   switch (operand_size) {
524     case OperandSize::kByte:
525       return BytecodeOperandUnsignedByte(operand_index, needs_poisoning);
526     case OperandSize::kShort:
527       return BytecodeOperandUnsignedShort(operand_index, needs_poisoning);
528     case OperandSize::kQuad:
529       return BytecodeOperandUnsignedQuad(operand_index, needs_poisoning);
530     case OperandSize::kNone:
531       UNREACHABLE();
532   }
533   return nullptr;
534 }
535 
BytecodeOperandCount(int operand_index)536 Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
537   DCHECK_EQ(OperandType::kRegCount,
538             Bytecodes::GetOperandType(bytecode_, operand_index));
539   OperandSize operand_size =
540       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
541   return BytecodeUnsignedOperand(operand_index, operand_size);
542 }
543 
BytecodeOperandFlag(int operand_index)544 Node* InterpreterAssembler::BytecodeOperandFlag(int operand_index) {
545   DCHECK_EQ(OperandType::kFlag8,
546             Bytecodes::GetOperandType(bytecode_, operand_index));
547   OperandSize operand_size =
548       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
549   DCHECK_EQ(operand_size, OperandSize::kByte);
550   return BytecodeUnsignedOperand(operand_index, operand_size);
551 }
552 
BytecodeOperandUImm(int operand_index)553 Node* InterpreterAssembler::BytecodeOperandUImm(int operand_index) {
554   DCHECK_EQ(OperandType::kUImm,
555             Bytecodes::GetOperandType(bytecode_, operand_index));
556   OperandSize operand_size =
557       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
558   return BytecodeUnsignedOperand(operand_index, operand_size);
559 }
560 
BytecodeOperandUImmWord(int operand_index)561 Node* InterpreterAssembler::BytecodeOperandUImmWord(int operand_index) {
562   return ChangeUint32ToWord(BytecodeOperandUImm(operand_index));
563 }
564 
BytecodeOperandUImmSmi(int operand_index)565 Node* InterpreterAssembler::BytecodeOperandUImmSmi(int operand_index) {
566   return SmiFromInt32(BytecodeOperandUImm(operand_index));
567 }
568 
BytecodeOperandImm(int operand_index)569 Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
570   DCHECK_EQ(OperandType::kImm,
571             Bytecodes::GetOperandType(bytecode_, operand_index));
572   OperandSize operand_size =
573       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
574   return BytecodeSignedOperand(operand_index, operand_size);
575 }
576 
BytecodeOperandImmIntPtr(int operand_index)577 Node* InterpreterAssembler::BytecodeOperandImmIntPtr(int operand_index) {
578   return ChangeInt32ToIntPtr(BytecodeOperandImm(operand_index));
579 }
580 
BytecodeOperandImmSmi(int operand_index)581 Node* InterpreterAssembler::BytecodeOperandImmSmi(int operand_index) {
582   return SmiFromInt32(BytecodeOperandImm(operand_index));
583 }
584 
BytecodeOperandIdxInt32(int operand_index)585 Node* InterpreterAssembler::BytecodeOperandIdxInt32(int operand_index) {
586   DCHECK_EQ(OperandType::kIdx,
587             Bytecodes::GetOperandType(bytecode_, operand_index));
588   OperandSize operand_size =
589       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
590   return BytecodeUnsignedOperand(operand_index, operand_size);
591 }
592 
BytecodeOperandIdx(int operand_index)593 Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
594   return ChangeUint32ToWord(BytecodeOperandIdxInt32(operand_index));
595 }
596 
BytecodeOperandIdxSmi(int operand_index)597 Node* InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) {
598   return SmiTag(BytecodeOperandIdx(operand_index));
599 }
600 
BytecodeOperandConstantPoolIdx(int operand_index,LoadSensitivity needs_poisoning)601 Node* InterpreterAssembler::BytecodeOperandConstantPoolIdx(
602     int operand_index, LoadSensitivity needs_poisoning) {
603   DCHECK_EQ(OperandType::kIdx,
604             Bytecodes::GetOperandType(bytecode_, operand_index));
605   OperandSize operand_size =
606       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
607   return ChangeUint32ToWord(
608       BytecodeUnsignedOperand(operand_index, operand_size, needs_poisoning));
609 }
610 
BytecodeOperandReg(int operand_index,LoadSensitivity needs_poisoning)611 Node* InterpreterAssembler::BytecodeOperandReg(
612     int operand_index, LoadSensitivity needs_poisoning) {
613   DCHECK(Bytecodes::IsRegisterOperandType(
614       Bytecodes::GetOperandType(bytecode_, operand_index)));
615   OperandSize operand_size =
616       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
617   return ChangeInt32ToIntPtr(
618       BytecodeSignedOperand(operand_index, operand_size, needs_poisoning));
619 }
620 
BytecodeOperandRuntimeId(int operand_index)621 Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) {
622   DCHECK_EQ(OperandType::kRuntimeId,
623             Bytecodes::GetOperandType(bytecode_, operand_index));
624   OperandSize operand_size =
625       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
626   DCHECK_EQ(operand_size, OperandSize::kShort);
627   return BytecodeUnsignedOperand(operand_index, operand_size);
628 }
629 
BytecodeOperandNativeContextIndex(int operand_index)630 Node* InterpreterAssembler::BytecodeOperandNativeContextIndex(
631     int operand_index) {
632   DCHECK_EQ(OperandType::kNativeContextIndex,
633             Bytecodes::GetOperandType(bytecode_, operand_index));
634   OperandSize operand_size =
635       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
636   return ChangeUint32ToWord(
637       BytecodeUnsignedOperand(operand_index, operand_size));
638 }
639 
BytecodeOperandIntrinsicId(int operand_index)640 Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) {
641   DCHECK_EQ(OperandType::kIntrinsicId,
642             Bytecodes::GetOperandType(bytecode_, operand_index));
643   OperandSize operand_size =
644       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
645   DCHECK_EQ(operand_size, OperandSize::kByte);
646   return BytecodeUnsignedOperand(operand_index, operand_size);
647 }
648 
LoadConstantPoolEntry(Node * index)649 Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
650   TNode<FixedArray> constant_pool = CAST(LoadObjectField(
651       BytecodeArrayTaggedPointer(), BytecodeArray::kConstantPoolOffset));
652   return LoadFixedArrayElement(constant_pool, UncheckedCast<IntPtrT>(index),
653                                LoadSensitivity::kCritical);
654 }
655 
LoadAndUntagConstantPoolEntry(Node * index)656 Node* InterpreterAssembler::LoadAndUntagConstantPoolEntry(Node* index) {
657   return SmiUntag(LoadConstantPoolEntry(index));
658 }
659 
LoadConstantPoolEntryAtOperandIndex(int operand_index)660 Node* InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex(
661     int operand_index) {
662   Node* index =
663       BytecodeOperandConstantPoolIdx(operand_index, LoadSensitivity::kSafe);
664   return LoadConstantPoolEntry(index);
665 }
666 
LoadAndUntagConstantPoolEntryAtOperandIndex(int operand_index)667 Node* InterpreterAssembler::LoadAndUntagConstantPoolEntryAtOperandIndex(
668     int operand_index) {
669   return SmiUntag(LoadConstantPoolEntryAtOperandIndex(operand_index));
670 }
671 
LoadFeedbackVector()672 TNode<FeedbackVector> InterpreterAssembler::LoadFeedbackVector() {
673   TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
674   return CodeStubAssembler::LoadFeedbackVector(function);
675 }
676 
CallPrologue()677 void InterpreterAssembler::CallPrologue() {
678   if (!Bytecodes::MakesCallAlongCriticalPath(bytecode_)) {
679     // Bytecodes that make a call along the critical path save the bytecode
680     // offset in the bytecode handler's prologue. For other bytecodes, if
681     // there are multiple calls in the bytecode handler, you need to spill
682     // before each of them, unless SaveBytecodeOffset has explicitly been called
683     // in a path that dominates _all_ of those calls (which we don't track).
684     SaveBytecodeOffset();
685   }
686 
687   if (FLAG_debug_code && !disable_stack_check_across_call_) {
688     DCHECK_NULL(stack_pointer_before_call_);
689     stack_pointer_before_call_ = LoadStackPointer();
690   }
691   bytecode_array_valid_ = false;
692   made_call_ = true;
693 }
694 
CallEpilogue()695 void InterpreterAssembler::CallEpilogue() {
696   if (FLAG_debug_code && !disable_stack_check_across_call_) {
697     Node* stack_pointer_after_call = LoadStackPointer();
698     Node* stack_pointer_before_call = stack_pointer_before_call_;
699     stack_pointer_before_call_ = nullptr;
700     AbortIfWordNotEqual(stack_pointer_before_call, stack_pointer_after_call,
701                         AbortReason::kUnexpectedStackPointer);
702   }
703 }
704 
IncrementCallCount(Node * feedback_vector,Node * slot_id)705 void InterpreterAssembler::IncrementCallCount(Node* feedback_vector,
706                                               Node* slot_id) {
707   Comment("increment call count");
708   TNode<Smi> call_count =
709       CAST(LoadFeedbackVectorSlot(feedback_vector, slot_id, kPointerSize));
710   // The lowest {FeedbackNexus::CallCountField::kShift} bits of the call
711   // count are used as flags. To increment the call count by 1 we hence
712   // have to increment by 1 << {FeedbackNexus::CallCountField::kShift}.
713   Node* new_count = SmiAdd(
714       call_count, SmiConstant(1 << FeedbackNexus::CallCountField::kShift));
715   // Count is Smi, so we don't need a write barrier.
716   StoreFeedbackVectorSlot(feedback_vector, slot_id, new_count,
717                           SKIP_WRITE_BARRIER, kPointerSize);
718 }
719 
CollectCallableFeedback(Node * target,Node * context,Node * feedback_vector,Node * slot_id)720 void InterpreterAssembler::CollectCallableFeedback(Node* target, Node* context,
721                                                    Node* feedback_vector,
722                                                    Node* slot_id) {
723   Label extra_checks(this, Label::kDeferred), done(this);
724 
725   // Check if we have monomorphic {target} feedback already.
726   TNode<MaybeObject> feedback =
727       LoadFeedbackVectorSlot(feedback_vector, slot_id);
728   Comment("check if monomorphic");
729   TNode<BoolT> is_monomorphic = IsWeakReferenceTo(feedback, CAST(target));
730   GotoIf(is_monomorphic, &done);
731 
732   // Check if it is a megamorphic {target}.
733   Comment("check if megamorphic");
734   Node* is_megamorphic = WordEqual(
735       feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
736   Branch(is_megamorphic, &done, &extra_checks);
737 
738   BIND(&extra_checks);
739   {
740     Label initialize(this), mark_megamorphic(this);
741 
742     Comment("check if weak reference");
743     Node* is_uninitialized = WordEqual(
744         feedback,
745         HeapConstant(FeedbackVector::UninitializedSentinel(isolate())));
746     GotoIf(is_uninitialized, &initialize);
747     CSA_ASSERT(this, IsWeakOrClearedHeapObject(feedback));
748 
749     // If the weak reference is cleared, we have a new chance to become
750     // monomorphic.
751     Comment("check if weak reference is cleared");
752     Branch(IsClearedWeakHeapObject(feedback), &initialize, &mark_megamorphic);
753 
754     BIND(&initialize);
755     {
756       // Check if {target} is a JSFunction in the current native context.
757       Comment("check if function in same native context");
758       GotoIf(TaggedIsSmi(target), &mark_megamorphic);
759       // Check if the {target} is a JSFunction or JSBoundFunction
760       // in the current native context.
761       VARIABLE(var_current, MachineRepresentation::kTagged, target);
762       Label loop(this, &var_current), done_loop(this);
763       Goto(&loop);
764       BIND(&loop);
765       {
766         Label if_boundfunction(this), if_function(this);
767         Node* current = var_current.value();
768         CSA_ASSERT(this, TaggedIsNotSmi(current));
769         Node* current_instance_type = LoadInstanceType(current);
770         GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
771                &if_boundfunction);
772         Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
773                &if_function, &mark_megamorphic);
774 
775         BIND(&if_function);
776         {
777           // Check that the JSFunction {current} is in the current native
778           // context.
779           Node* current_context =
780               LoadObjectField(current, JSFunction::kContextOffset);
781           Node* current_native_context = LoadNativeContext(current_context);
782           Branch(WordEqual(LoadNativeContext(context), current_native_context),
783                  &done_loop, &mark_megamorphic);
784         }
785 
786         BIND(&if_boundfunction);
787         {
788           // Continue with the [[BoundTargetFunction]] of {target}.
789           var_current.Bind(LoadObjectField(
790               current, JSBoundFunction::kBoundTargetFunctionOffset));
791           Goto(&loop);
792         }
793       }
794       BIND(&done_loop);
795       StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id,
796                                          CAST(target));
797       ReportFeedbackUpdate(feedback_vector, slot_id, "Call:Initialize");
798       Goto(&done);
799     }
800 
801     BIND(&mark_megamorphic);
802     {
803       // MegamorphicSentinel is an immortal immovable object so
804       // write-barrier is not needed.
805       Comment("transition to megamorphic");
806       DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
807       StoreFeedbackVectorSlot(
808           feedback_vector, slot_id,
809           HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
810           SKIP_WRITE_BARRIER);
811       ReportFeedbackUpdate(feedback_vector, slot_id,
812                            "Call:TransitionMegamorphic");
813       Goto(&done);
814     }
815   }
816 
817   BIND(&done);
818 }
819 
CollectCallFeedback(Node * target,Node * context,Node * feedback_vector,Node * slot_id)820 void InterpreterAssembler::CollectCallFeedback(Node* target, Node* context,
821                                                Node* feedback_vector,
822                                                Node* slot_id) {
823   // Increment the call count.
824   IncrementCallCount(feedback_vector, slot_id);
825 
826   // Collect the callable {target} feedback.
827   CollectCallableFeedback(target, context, feedback_vector, slot_id);
828 }
829 
CallJSAndDispatch(Node * function,Node * context,const RegListNodePair & args,ConvertReceiverMode receiver_mode)830 void InterpreterAssembler::CallJSAndDispatch(
831     Node* function, Node* context, const RegListNodePair& args,
832     ConvertReceiverMode receiver_mode) {
833   DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
834   DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) ||
835          bytecode_ == Bytecode::kInvokeIntrinsic);
836   DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
837 
838   Node* args_count;
839   if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
840     // The receiver is implied, so it is not in the argument list.
841     args_count = args.reg_count();
842   } else {
843     // Subtract the receiver from the argument count.
844     Node* receiver_count = Int32Constant(1);
845     args_count = Int32Sub(args.reg_count(), receiver_count);
846   }
847 
848   Callable callable = CodeFactory::InterpreterPushArgsThenCall(
849       isolate(), receiver_mode, InterpreterPushArgsMode::kOther);
850   Node* code_target = HeapConstant(callable.code());
851 
852   TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
853                                    args_count, args.base_reg_location(),
854                                    function);
855   // TailCallStubThenDispatch updates accumulator with result.
856   accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
857 }
858 
859 template <class... TArgs>
CallJSAndDispatch(Node * function,Node * context,Node * arg_count,ConvertReceiverMode receiver_mode,TArgs...args)860 void InterpreterAssembler::CallJSAndDispatch(Node* function, Node* context,
861                                              Node* arg_count,
862                                              ConvertReceiverMode receiver_mode,
863                                              TArgs... args) {
864   DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
865   DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) ||
866          bytecode_ == Bytecode::kInvokeIntrinsic);
867   DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
868   Callable callable = CodeFactory::Call(isolate());
869   Node* code_target = HeapConstant(callable.code());
870 
871   if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
872     // The first argument parameter (the receiver) is implied to be undefined.
873     TailCallStubThenBytecodeDispatch(
874         callable.descriptor(), code_target, context, function, arg_count,
875         static_cast<Node*>(UndefinedConstant()), args...);
876   } else {
877     TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
878                                      context, function, arg_count, args...);
879   }
880   // TailCallStubThenDispatch updates accumulator with result.
881   accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
882 }
883 
884 // Instantiate CallJSAndDispatch() for argument counts used by interpreter
885 // generator.
886 template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
887     Node* function, Node* context, Node* arg_count,
888     ConvertReceiverMode receiver_mode);
889 template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
890     Node* function, Node* context, Node* arg_count,
891     ConvertReceiverMode receiver_mode, Node*);
892 template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
893     Node* function, Node* context, Node* arg_count,
894     ConvertReceiverMode receiver_mode, Node*, Node*);
895 template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
896     Node* function, Node* context, Node* arg_count,
897     ConvertReceiverMode receiver_mode, Node*, Node*, Node*);
898 
CallJSWithSpreadAndDispatch(Node * function,Node * context,const RegListNodePair & args,Node * slot_id,Node * feedback_vector)899 void InterpreterAssembler::CallJSWithSpreadAndDispatch(
900     Node* function, Node* context, const RegListNodePair& args, Node* slot_id,
901     Node* feedback_vector) {
902   DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
903   DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), ConvertReceiverMode::kAny);
904   CollectCallFeedback(function, context, feedback_vector, slot_id);
905   Comment("call using CallWithSpread builtin");
906   Callable callable = CodeFactory::InterpreterPushArgsThenCall(
907       isolate(), ConvertReceiverMode::kAny,
908       InterpreterPushArgsMode::kWithFinalSpread);
909   Node* code_target = HeapConstant(callable.code());
910 
911   Node* receiver_count = Int32Constant(1);
912   Node* args_count = Int32Sub(args.reg_count(), receiver_count);
913   TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
914                                    args_count, args.base_reg_location(),
915                                    function);
916   // TailCallStubThenDispatch updates accumulator with result.
917   accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
918 }
919 
Construct(Node * target,Node * context,Node * new_target,const RegListNodePair & args,Node * slot_id,Node * feedback_vector)920 Node* InterpreterAssembler::Construct(Node* target, Node* context,
921                                       Node* new_target,
922                                       const RegListNodePair& args,
923                                       Node* slot_id, Node* feedback_vector) {
924   DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
925   VARIABLE(var_result, MachineRepresentation::kTagged);
926   VARIABLE(var_site, MachineRepresentation::kTagged);
927   Label extra_checks(this, Label::kDeferred), return_result(this, &var_result),
928       construct(this), construct_array(this, &var_site);
929 
930   // Increment the call count.
931   IncrementCallCount(feedback_vector, slot_id);
932 
933   // Check if we have monomorphic {new_target} feedback already.
934   TNode<MaybeObject> feedback =
935       LoadFeedbackVectorSlot(feedback_vector, slot_id);
936   Branch(IsWeakReferenceTo(feedback, CAST(new_target)), &construct,
937          &extra_checks);
938 
939   BIND(&extra_checks);
940   {
941     Label check_allocation_site(this), check_initialized(this),
942         initialize(this), mark_megamorphic(this);
943 
944     // Check if it is a megamorphic {new_target}..
945     Comment("check if megamorphic");
946     Node* is_megamorphic = WordEqual(
947         feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
948     GotoIf(is_megamorphic, &construct);
949 
950     Comment("check if weak reference");
951     GotoIfNot(IsWeakOrClearedHeapObject(feedback), &check_allocation_site);
952 
953     // If the weak reference is cleared, we have a new chance to become
954     // monomorphic.
955     Comment("check if weak reference is cleared");
956     Branch(IsClearedWeakHeapObject(feedback), &initialize, &mark_megamorphic);
957 
958     BIND(&check_allocation_site);
959     {
960       // Check if it is an AllocationSite.
961       Comment("check if allocation site");
962       TNode<HeapObject> strong_feedback = CAST(feedback);
963       GotoIfNot(IsAllocationSite(strong_feedback), &check_initialized);
964 
965       // Make sure that {target} and {new_target} are the Array constructor.
966       Node* array_function = LoadContextElement(LoadNativeContext(context),
967                                                 Context::ARRAY_FUNCTION_INDEX);
968       GotoIfNot(WordEqual(target, array_function), &mark_megamorphic);
969       GotoIfNot(WordEqual(new_target, array_function), &mark_megamorphic);
970       var_site.Bind(strong_feedback);
971       Goto(&construct_array);
972     }
973 
974     BIND(&check_initialized);
975     {
976       // Check if it is uninitialized.
977       Comment("check if uninitialized");
978       Node* is_uninitialized =
979           WordEqual(feedback, LoadRoot(Heap::kuninitialized_symbolRootIndex));
980       Branch(is_uninitialized, &initialize, &mark_megamorphic);
981     }
982 
983     BIND(&initialize);
984     {
985       Comment("check if function in same native context");
986       GotoIf(TaggedIsSmi(new_target), &mark_megamorphic);
987       // Check if the {new_target} is a JSFunction or JSBoundFunction
988       // in the current native context.
989       VARIABLE(var_current, MachineRepresentation::kTagged, new_target);
990       Label loop(this, &var_current), done_loop(this);
991       Goto(&loop);
992       BIND(&loop);
993       {
994         Label if_boundfunction(this), if_function(this);
995         Node* current = var_current.value();
996         CSA_ASSERT(this, TaggedIsNotSmi(current));
997         Node* current_instance_type = LoadInstanceType(current);
998         GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
999                &if_boundfunction);
1000         Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
1001                &if_function, &mark_megamorphic);
1002 
1003         BIND(&if_function);
1004         {
1005           // Check that the JSFunction {current} is in the current native
1006           // context.
1007           Node* current_context =
1008               LoadObjectField(current, JSFunction::kContextOffset);
1009           Node* current_native_context = LoadNativeContext(current_context);
1010           Branch(WordEqual(LoadNativeContext(context), current_native_context),
1011                  &done_loop, &mark_megamorphic);
1012         }
1013 
1014         BIND(&if_boundfunction);
1015         {
1016           // Continue with the [[BoundTargetFunction]] of {current}.
1017           var_current.Bind(LoadObjectField(
1018               current, JSBoundFunction::kBoundTargetFunctionOffset));
1019           Goto(&loop);
1020         }
1021       }
1022       BIND(&done_loop);
1023 
1024       // Create an AllocationSite if {target} and {new_target} refer
1025       // to the current native context's Array constructor.
1026       Label create_allocation_site(this), store_weak_reference(this);
1027       GotoIfNot(WordEqual(target, new_target), &store_weak_reference);
1028       Node* array_function = LoadContextElement(LoadNativeContext(context),
1029                                                 Context::ARRAY_FUNCTION_INDEX);
1030       Branch(WordEqual(target, array_function), &create_allocation_site,
1031              &store_weak_reference);
1032 
1033       BIND(&create_allocation_site);
1034       {
1035         var_site.Bind(CreateAllocationSiteInFeedbackVector(feedback_vector,
1036                                                            SmiTag(slot_id)));
1037         ReportFeedbackUpdate(feedback_vector, slot_id,
1038                              "Construct:CreateAllocationSite");
1039         Goto(&construct_array);
1040       }
1041 
1042       BIND(&store_weak_reference);
1043       {
1044         StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id,
1045                                            CAST(new_target));
1046         ReportFeedbackUpdate(feedback_vector, slot_id,
1047                              "Construct:StoreWeakReference");
1048         Goto(&construct);
1049       }
1050     }
1051 
1052     BIND(&mark_megamorphic);
1053     {
1054       // MegamorphicSentinel is an immortal immovable object so
1055       // write-barrier is not needed.
1056       Comment("transition to megamorphic");
1057       DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
1058       StoreFeedbackVectorSlot(
1059           feedback_vector, slot_id,
1060           HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
1061           SKIP_WRITE_BARRIER);
1062       ReportFeedbackUpdate(feedback_vector, slot_id,
1063                            "Construct:TransitionMegamorphic");
1064       Goto(&construct);
1065     }
1066   }
1067 
1068   BIND(&construct_array);
1069   {
1070     // TODO(bmeurer): Introduce a dedicated builtin to deal with the Array
1071     // constructor feedback collection inside of Ignition.
1072     Comment("call using ConstructArray builtin");
1073     Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
1074         isolate(), InterpreterPushArgsMode::kArrayFunction);
1075     Node* code_target = HeapConstant(callable.code());
1076     var_result.Bind(CallStub(callable.descriptor(), code_target, context,
1077                              args.reg_count(), new_target, target,
1078                              var_site.value(), args.base_reg_location()));
1079     Goto(&return_result);
1080   }
1081 
1082   BIND(&construct);
1083   {
1084     // TODO(bmeurer): Remove the generic type_info parameter from the Construct.
1085     Comment("call using Construct builtin");
1086     Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
1087         isolate(), InterpreterPushArgsMode::kOther);
1088     Node* code_target = HeapConstant(callable.code());
1089     var_result.Bind(CallStub(callable.descriptor(), code_target, context,
1090                              args.reg_count(), new_target, target,
1091                              UndefinedConstant(), args.base_reg_location()));
1092     Goto(&return_result);
1093   }
1094 
1095   BIND(&return_result);
1096   return var_result.value();
1097 }
1098 
ConstructWithSpread(Node * target,Node * context,Node * new_target,const RegListNodePair & args,Node * slot_id,Node * feedback_vector)1099 Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
1100                                                 Node* new_target,
1101                                                 const RegListNodePair& args,
1102                                                 Node* slot_id,
1103                                                 Node* feedback_vector) {
1104   // TODO(bmeurer): Unify this with the Construct bytecode feedback
1105   // above once we have a way to pass the AllocationSite to the Array
1106   // constructor _and_ spread the last argument at the same time.
1107   DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
1108   Label extra_checks(this, Label::kDeferred), construct(this);
1109 
1110   // Increment the call count.
1111   IncrementCallCount(feedback_vector, slot_id);
1112 
1113   // Check if we have monomorphic {new_target} feedback already.
1114   TNode<MaybeObject> feedback =
1115       LoadFeedbackVectorSlot(feedback_vector, slot_id);
1116   Branch(IsWeakReferenceTo(feedback, CAST(new_target)), &construct,
1117          &extra_checks);
1118 
1119   BIND(&extra_checks);
1120   {
1121     Label check_initialized(this), initialize(this), mark_megamorphic(this);
1122 
1123     // Check if it is a megamorphic {new_target}.
1124     Comment("check if megamorphic");
1125     Node* is_megamorphic = WordEqual(
1126         feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
1127     GotoIf(is_megamorphic, &construct);
1128 
1129     Comment("check if weak reference");
1130     GotoIfNot(IsWeakOrClearedHeapObject(feedback), &check_initialized);
1131 
1132     // If the weak reference is cleared, we have a new chance to become
1133     // monomorphic.
1134     Comment("check if weak reference is cleared");
1135     Branch(IsClearedWeakHeapObject(feedback), &initialize, &mark_megamorphic);
1136 
1137     BIND(&check_initialized);
1138     {
1139       // Check if it is uninitialized.
1140       Comment("check if uninitialized");
1141       Node* is_uninitialized =
1142           WordEqual(feedback, LoadRoot(Heap::kuninitialized_symbolRootIndex));
1143       Branch(is_uninitialized, &initialize, &mark_megamorphic);
1144     }
1145 
1146     BIND(&initialize);
1147     {
1148       Comment("check if function in same native context");
1149       GotoIf(TaggedIsSmi(new_target), &mark_megamorphic);
1150       // Check if the {new_target} is a JSFunction or JSBoundFunction
1151       // in the current native context.
1152       VARIABLE(var_current, MachineRepresentation::kTagged, new_target);
1153       Label loop(this, &var_current), done_loop(this);
1154       Goto(&loop);
1155       BIND(&loop);
1156       {
1157         Label if_boundfunction(this), if_function(this);
1158         Node* current = var_current.value();
1159         CSA_ASSERT(this, TaggedIsNotSmi(current));
1160         Node* current_instance_type = LoadInstanceType(current);
1161         GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
1162                &if_boundfunction);
1163         Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
1164                &if_function, &mark_megamorphic);
1165 
1166         BIND(&if_function);
1167         {
1168           // Check that the JSFunction {current} is in the current native
1169           // context.
1170           Node* current_context =
1171               LoadObjectField(current, JSFunction::kContextOffset);
1172           Node* current_native_context = LoadNativeContext(current_context);
1173           Branch(WordEqual(LoadNativeContext(context), current_native_context),
1174                  &done_loop, &mark_megamorphic);
1175         }
1176 
1177         BIND(&if_boundfunction);
1178         {
1179           // Continue with the [[BoundTargetFunction]] of {current}.
1180           var_current.Bind(LoadObjectField(
1181               current, JSBoundFunction::kBoundTargetFunctionOffset));
1182           Goto(&loop);
1183         }
1184       }
1185       BIND(&done_loop);
1186       StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id,
1187                                          CAST(new_target));
1188       ReportFeedbackUpdate(feedback_vector, slot_id,
1189                            "ConstructWithSpread:Initialize");
1190       Goto(&construct);
1191     }
1192 
1193     BIND(&mark_megamorphic);
1194     {
1195       // MegamorphicSentinel is an immortal immovable object so
1196       // write-barrier is not needed.
1197       Comment("transition to megamorphic");
1198       DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
1199       StoreFeedbackVectorSlot(
1200           feedback_vector, slot_id,
1201           HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
1202           SKIP_WRITE_BARRIER);
1203       ReportFeedbackUpdate(feedback_vector, slot_id,
1204                            "ConstructWithSpread:TransitionMegamorphic");
1205       Goto(&construct);
1206     }
1207   }
1208 
1209   BIND(&construct);
1210   Comment("call using ConstructWithSpread builtin");
1211   Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
1212       isolate(), InterpreterPushArgsMode::kWithFinalSpread);
1213   Node* code_target = HeapConstant(callable.code());
1214   return CallStub(callable.descriptor(), code_target, context, args.reg_count(),
1215                   new_target, target, UndefinedConstant(),
1216                   args.base_reg_location());
1217 }
1218 
CallRuntimeN(Node * function_id,Node * context,const RegListNodePair & args,int result_size)1219 Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
1220                                          const RegListNodePair& args,
1221                                          int result_size) {
1222   DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
1223   DCHECK(Bytecodes::IsCallRuntime(bytecode_));
1224   Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size);
1225   Node* code_target = HeapConstant(callable.code());
1226 
1227   // Get the function entry from the function id.
1228   Node* function_table = ExternalConstant(
1229       ExternalReference::runtime_function_table_address(isolate()));
1230   Node* function_offset =
1231       Int32Mul(function_id, Int32Constant(sizeof(Runtime::Function)));
1232   Node* function =
1233       IntPtrAdd(function_table, ChangeUint32ToWord(function_offset));
1234   Node* function_entry =
1235       Load(MachineType::Pointer(), function,
1236            IntPtrConstant(offsetof(Runtime::Function, entry)));
1237 
1238   return CallStubR(callable.descriptor(), result_size, code_target, context,
1239                    args.reg_count(), args.base_reg_location(), function_entry);
1240 }
1241 
UpdateInterruptBudget(Node * weight,bool backward)1242 void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
1243   Comment("[ UpdateInterruptBudget");
1244 
1245   Node* budget_offset =
1246       IntPtrConstant(BytecodeArray::kInterruptBudgetOffset - kHeapObjectTag);
1247 
1248   // Assert that the weight is positive (negative weights should be implemented
1249   // as backward updates).
1250   CSA_ASSERT(this, Int32GreaterThanOrEqual(weight, Int32Constant(0)));
1251 
1252   // Update budget by |weight| and check if it reaches zero.
1253   Variable new_budget(this, MachineRepresentation::kWord32);
1254   Node* old_budget =
1255       Load(MachineType::Int32(), BytecodeArrayTaggedPointer(), budget_offset);
1256   // Make sure we include the current bytecode in the budget calculation.
1257   Node* budget_after_bytecode =
1258       Int32Sub(old_budget, Int32Constant(CurrentBytecodeSize()));
1259 
1260   if (backward) {
1261     new_budget.Bind(Int32Sub(budget_after_bytecode, weight));
1262 
1263     Node* condition =
1264         Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0));
1265     Label ok(this), interrupt_check(this, Label::kDeferred);
1266     Branch(condition, &ok, &interrupt_check);
1267 
1268     // Perform interrupt and reset budget.
1269     BIND(&interrupt_check);
1270     {
1271       CallRuntime(Runtime::kInterrupt, GetContext());
1272       new_budget.Bind(Int32Constant(Interpreter::InterruptBudget()));
1273       Goto(&ok);
1274     }
1275 
1276     BIND(&ok);
1277   } else {
1278     // For a forward jump, we know we only increase the interrupt budget, so
1279     // no need to check if it's below zero.
1280     new_budget.Bind(Int32Add(budget_after_bytecode, weight));
1281   }
1282 
1283   // Update budget.
1284   StoreNoWriteBarrier(MachineRepresentation::kWord32,
1285                       BytecodeArrayTaggedPointer(), budget_offset,
1286                       new_budget.value());
1287   Comment("] UpdateInterruptBudget");
1288 }
1289 
Advance()1290 Node* InterpreterAssembler::Advance() { return Advance(CurrentBytecodeSize()); }
1291 
Advance(int delta)1292 Node* InterpreterAssembler::Advance(int delta) {
1293   return Advance(IntPtrConstant(delta));
1294 }
1295 
Advance(Node * delta,bool backward)1296 Node* InterpreterAssembler::Advance(Node* delta, bool backward) {
1297 #ifdef V8_TRACE_IGNITION
1298   TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
1299 #endif
1300   Node* next_offset = backward ? IntPtrSub(BytecodeOffset(), delta)
1301                                : IntPtrAdd(BytecodeOffset(), delta);
1302   bytecode_offset_.Bind(next_offset);
1303   return next_offset;
1304 }
1305 
Jump(Node * delta,bool backward)1306 Node* InterpreterAssembler::Jump(Node* delta, bool backward) {
1307   DCHECK(!Bytecodes::IsStarLookahead(bytecode_, operand_scale_));
1308 
1309   UpdateInterruptBudget(TruncateIntPtrToInt32(delta), backward);
1310   Node* new_bytecode_offset = Advance(delta, backward);
1311   Node* target_bytecode = LoadBytecode(new_bytecode_offset);
1312   return DispatchToBytecode(target_bytecode, new_bytecode_offset);
1313 }
1314 
Jump(Node * delta)1315 Node* InterpreterAssembler::Jump(Node* delta) { return Jump(delta, false); }
1316 
JumpBackward(Node * delta)1317 Node* InterpreterAssembler::JumpBackward(Node* delta) {
1318   return Jump(delta, true);
1319 }
1320 
JumpConditional(Node * condition,Node * delta)1321 void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
1322   Label match(this), no_match(this);
1323 
1324   Branch(condition, &match, &no_match);
1325   BIND(&match);
1326   Jump(delta);
1327   BIND(&no_match);
1328   Dispatch();
1329 }
1330 
JumpIfWordEqual(Node * lhs,Node * rhs,Node * delta)1331 void InterpreterAssembler::JumpIfWordEqual(Node* lhs, Node* rhs, Node* delta) {
1332   JumpConditional(WordEqual(lhs, rhs), delta);
1333 }
1334 
JumpIfWordNotEqual(Node * lhs,Node * rhs,Node * delta)1335 void InterpreterAssembler::JumpIfWordNotEqual(Node* lhs, Node* rhs,
1336                                               Node* delta) {
1337   JumpConditional(WordNotEqual(lhs, rhs), delta);
1338 }
1339 
LoadBytecode(Node * bytecode_offset)1340 Node* InterpreterAssembler::LoadBytecode(Node* bytecode_offset) {
1341   Node* bytecode =
1342       Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(), bytecode_offset);
1343   return ChangeUint32ToWord(bytecode);
1344 }
1345 
StarDispatchLookahead(Node * target_bytecode)1346 Node* InterpreterAssembler::StarDispatchLookahead(Node* target_bytecode) {
1347   Label do_inline_star(this), done(this);
1348 
1349   Variable var_bytecode(this, MachineType::PointerRepresentation());
1350   var_bytecode.Bind(target_bytecode);
1351 
1352   Node* star_bytecode = IntPtrConstant(static_cast<int>(Bytecode::kStar));
1353   Node* is_star = WordEqual(target_bytecode, star_bytecode);
1354   Branch(is_star, &do_inline_star, &done);
1355 
1356   BIND(&do_inline_star);
1357   {
1358     InlineStar();
1359     var_bytecode.Bind(LoadBytecode(BytecodeOffset()));
1360     Goto(&done);
1361   }
1362   BIND(&done);
1363   return var_bytecode.value();
1364 }
1365 
InlineStar()1366 void InterpreterAssembler::InlineStar() {
1367   Bytecode previous_bytecode = bytecode_;
1368   AccumulatorUse previous_acc_use = accumulator_use_;
1369 
1370   bytecode_ = Bytecode::kStar;
1371   accumulator_use_ = AccumulatorUse::kNone;
1372 
1373 #ifdef V8_TRACE_IGNITION
1374   TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
1375 #endif
1376   StoreRegister(GetAccumulator(),
1377                 BytecodeOperandReg(0, LoadSensitivity::kSafe));
1378 
1379   DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
1380 
1381   Advance();
1382   bytecode_ = previous_bytecode;
1383   accumulator_use_ = previous_acc_use;
1384 }
1385 
Dispatch()1386 Node* InterpreterAssembler::Dispatch() {
1387   Comment("========= Dispatch");
1388   DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
1389   Node* target_offset = Advance();
1390   Node* target_bytecode = LoadBytecode(target_offset);
1391 
1392   if (Bytecodes::IsStarLookahead(bytecode_, operand_scale_)) {
1393     target_bytecode = StarDispatchLookahead(target_bytecode);
1394   }
1395   return DispatchToBytecode(target_bytecode, BytecodeOffset());
1396 }
1397 
DispatchToBytecode(Node * target_bytecode,Node * new_bytecode_offset)1398 Node* InterpreterAssembler::DispatchToBytecode(Node* target_bytecode,
1399                                                Node* new_bytecode_offset) {
1400   if (FLAG_trace_ignition_dispatches) {
1401     TraceBytecodeDispatch(target_bytecode);
1402   }
1403 
1404   Node* target_code_entry =
1405       Load(MachineType::Pointer(), DispatchTableRawPointer(),
1406            TimesPointerSize(target_bytecode));
1407 
1408   return DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset,
1409                                         target_bytecode);
1410 }
1411 
DispatchToBytecodeHandler(Node * handler,Node * bytecode_offset,Node * target_bytecode)1412 Node* InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
1413                                                       Node* bytecode_offset,
1414                                                       Node* target_bytecode) {
1415   // TODO(ishell): Add CSA::CodeEntryPoint(code).
1416   Node* handler_entry =
1417       IntPtrAdd(BitcastTaggedToWord(handler),
1418                 IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
1419   return DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset,
1420                                         target_bytecode);
1421 }
1422 
DispatchToBytecodeHandlerEntry(Node * handler_entry,Node * bytecode_offset,Node * target_bytecode)1423 Node* InterpreterAssembler::DispatchToBytecodeHandlerEntry(
1424     Node* handler_entry, Node* bytecode_offset, Node* target_bytecode) {
1425   // Propagate speculation poisoning.
1426   Node* poisoned_handler_entry = WordPoisonOnSpeculation(handler_entry);
1427   return TailCallBytecodeDispatch(
1428       InterpreterDispatchDescriptor{}, poisoned_handler_entry,
1429       GetAccumulatorUnchecked(), bytecode_offset, BytecodeArrayTaggedPointer(),
1430       DispatchTableRawPointer());
1431 }
1432 
DispatchWide(OperandScale operand_scale)1433 void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
1434   // Dispatching a wide bytecode requires treating the prefix
1435   // bytecode a base pointer into the dispatch table and dispatching
1436   // the bytecode that follows relative to this base.
1437   //
1438   //   Indices 0-255 correspond to bytecodes with operand_scale == 0
1439   //   Indices 256-511 correspond to bytecodes with operand_scale == 1
1440   //   Indices 512-767 correspond to bytecodes with operand_scale == 2
1441   DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
1442   Node* next_bytecode_offset = Advance(1);
1443   Node* next_bytecode = LoadBytecode(next_bytecode_offset);
1444 
1445   if (FLAG_trace_ignition_dispatches) {
1446     TraceBytecodeDispatch(next_bytecode);
1447   }
1448 
1449   Node* base_index;
1450   switch (operand_scale) {
1451     case OperandScale::kDouble:
1452       base_index = IntPtrConstant(1 << kBitsPerByte);
1453       break;
1454     case OperandScale::kQuadruple:
1455       base_index = IntPtrConstant(2 << kBitsPerByte);
1456       break;
1457     default:
1458       UNREACHABLE();
1459   }
1460   Node* target_index = IntPtrAdd(base_index, next_bytecode);
1461   Node* target_code_entry =
1462       Load(MachineType::Pointer(), DispatchTableRawPointer(),
1463            TimesPointerSize(target_index));
1464 
1465   DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset,
1466                                  next_bytecode);
1467 }
1468 
UpdateInterruptBudgetOnReturn()1469 void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
1470   // TODO(rmcilroy): Investigate whether it is worth supporting self
1471   // optimization of primitive functions like FullCodegen.
1472 
1473   // Update profiling count by the number of bytes between the end of the
1474   // current bytecode and the start of the first one, to simulate backedge to
1475   // start of function.
1476   //
1477   // With headers and current offset, the bytecode array layout looks like:
1478   //
1479   //           <---------- simulated backedge ----------
1480   // | header | first bytecode | .... | return bytecode |
1481   //  |<------ current offset ------->
1482   //  ^ tagged bytecode array pointer
1483   //
1484   // UpdateInterruptBudget already handles adding the bytecode size to the
1485   // length of the back-edge, so we just have to correct for the non-zero offset
1486   // of the first bytecode.
1487 
1488   const int kFirstBytecodeOffset = BytecodeArray::kHeaderSize - kHeapObjectTag;
1489   Node* profiling_weight = Int32Sub(TruncateIntPtrToInt32(BytecodeOffset()),
1490                                     Int32Constant(kFirstBytecodeOffset));
1491   UpdateInterruptBudget(profiling_weight, true);
1492 }
1493 
LoadOSRNestingLevel()1494 Node* InterpreterAssembler::LoadOSRNestingLevel() {
1495   return LoadObjectField(BytecodeArrayTaggedPointer(),
1496                          BytecodeArray::kOSRNestingLevelOffset,
1497                          MachineType::Int8());
1498 }
1499 
Abort(AbortReason abort_reason)1500 void InterpreterAssembler::Abort(AbortReason abort_reason) {
1501   disable_stack_check_across_call_ = true;
1502   Node* abort_id = SmiConstant(abort_reason);
1503   CallRuntime(Runtime::kAbort, GetContext(), abort_id);
1504   disable_stack_check_across_call_ = false;
1505 }
1506 
AbortIfWordNotEqual(Node * lhs,Node * rhs,AbortReason abort_reason)1507 void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
1508                                                AbortReason abort_reason) {
1509   Label ok(this), abort(this, Label::kDeferred);
1510   Branch(WordEqual(lhs, rhs), &ok, &abort);
1511 
1512   BIND(&abort);
1513   Abort(abort_reason);
1514   Goto(&ok);
1515 
1516   BIND(&ok);
1517 }
1518 
MaybeDropFrames(Node * context)1519 void InterpreterAssembler::MaybeDropFrames(Node* context) {
1520   Node* restart_fp_address =
1521       ExternalConstant(ExternalReference::debug_restart_fp_address(isolate()));
1522 
1523   Node* restart_fp = Load(MachineType::Pointer(), restart_fp_address);
1524   Node* null = IntPtrConstant(0);
1525 
1526   Label ok(this), drop_frames(this);
1527   Branch(IntPtrEqual(restart_fp, null), &ok, &drop_frames);
1528 
1529   BIND(&drop_frames);
1530   // We don't expect this call to return since the frame dropper tears down
1531   // the stack and jumps into the function on the target frame to restart it.
1532   CallStub(CodeFactory::FrameDropperTrampoline(isolate()), context, restart_fp);
1533   Abort(AbortReason::kUnexpectedReturnFromFrameDropper);
1534   Goto(&ok);
1535 
1536   BIND(&ok);
1537 }
1538 
TraceBytecode(Runtime::FunctionId function_id)1539 void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
1540   CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(),
1541               SmiTag(BytecodeOffset()), GetAccumulatorUnchecked());
1542 }
1543 
TraceBytecodeDispatch(Node * target_bytecode)1544 void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
1545   Node* counters_table = ExternalConstant(
1546       ExternalReference::interpreter_dispatch_counters(isolate()));
1547   Node* source_bytecode_table_index = IntPtrConstant(
1548       static_cast<int>(bytecode_) * (static_cast<int>(Bytecode::kLast) + 1));
1549 
1550   Node* counter_offset =
1551       TimesPointerSize(IntPtrAdd(source_bytecode_table_index, target_bytecode));
1552   Node* old_counter =
1553       Load(MachineType::IntPtr(), counters_table, counter_offset);
1554 
1555   Label counter_ok(this), counter_saturated(this, Label::kDeferred);
1556 
1557   Node* counter_reached_max = WordEqual(
1558       old_counter, IntPtrConstant(std::numeric_limits<uintptr_t>::max()));
1559   Branch(counter_reached_max, &counter_saturated, &counter_ok);
1560 
1561   BIND(&counter_ok);
1562   {
1563     Node* new_counter = IntPtrAdd(old_counter, IntPtrConstant(1));
1564     StoreNoWriteBarrier(MachineType::PointerRepresentation(), counters_table,
1565                         counter_offset, new_counter);
1566     Goto(&counter_saturated);
1567   }
1568 
1569   BIND(&counter_saturated);
1570 }
1571 
1572 // static
TargetSupportsUnalignedAccess()1573 bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
1574 #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
1575   return false;
1576 #elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390 || \
1577     V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
1578   return true;
1579 #else
1580 #error "Unknown Architecture"
1581 #endif
1582 }
1583 
AbortIfRegisterCountInvalid(Node * parameters_and_registers,Node * formal_parameter_count,Node * register_count)1584 void InterpreterAssembler::AbortIfRegisterCountInvalid(
1585     Node* parameters_and_registers, Node* formal_parameter_count,
1586     Node* register_count) {
1587   Node* array_size = LoadAndUntagFixedArrayBaseLength(parameters_and_registers);
1588 
1589   Label ok(this), abort(this, Label::kDeferred);
1590   Branch(UintPtrLessThanOrEqual(
1591              IntPtrAdd(formal_parameter_count, register_count), array_size),
1592          &ok, &abort);
1593 
1594   BIND(&abort);
1595   Abort(AbortReason::kInvalidParametersAndRegistersInGenerator);
1596   Goto(&ok);
1597 
1598   BIND(&ok);
1599 }
1600 
ExportParametersAndRegisterFile(TNode<FixedArray> array,const RegListNodePair & registers,TNode<Int32T> formal_parameter_count)1601 Node* InterpreterAssembler::ExportParametersAndRegisterFile(
1602     TNode<FixedArray> array, const RegListNodePair& registers,
1603     TNode<Int32T> formal_parameter_count) {
1604   // Store the formal parameters (without receiver) followed by the
1605   // registers into the generator's internal parameters_and_registers field.
1606   TNode<IntPtrT> formal_parameter_count_intptr =
1607       ChangeInt32ToIntPtr(formal_parameter_count);
1608   Node* register_count = ChangeUint32ToWord(registers.reg_count());
1609   if (FLAG_debug_code) {
1610     CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
1611                                  RegisterLocation(Register(0))));
1612     AbortIfRegisterCountInvalid(array, formal_parameter_count_intptr,
1613                                 register_count);
1614   }
1615 
1616   {
1617     Variable var_index(this, MachineType::PointerRepresentation());
1618     var_index.Bind(IntPtrConstant(0));
1619 
1620     // Iterate over parameters and write them into the array.
1621     Label loop(this, &var_index), done_loop(this);
1622 
1623     Node* reg_base = IntPtrAdd(
1624         IntPtrConstant(Register::FromParameterIndex(0, 1).ToOperand() - 1),
1625         formal_parameter_count_intptr);
1626 
1627     Goto(&loop);
1628     BIND(&loop);
1629     {
1630       Node* index = var_index.value();
1631       GotoIfNot(UintPtrLessThan(index, formal_parameter_count_intptr),
1632                 &done_loop);
1633 
1634       Node* reg_index = IntPtrSub(reg_base, index);
1635       Node* value = LoadRegister(reg_index);
1636 
1637       StoreFixedArrayElement(array, index, value);
1638 
1639       var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
1640       Goto(&loop);
1641     }
1642     BIND(&done_loop);
1643   }
1644 
1645   {
1646     // Iterate over register file and write values into array.
1647     // The mapping of register to array index must match that used in
1648     // BytecodeGraphBuilder::VisitResumeGenerator.
1649     Variable var_index(this, MachineType::PointerRepresentation());
1650     var_index.Bind(IntPtrConstant(0));
1651 
1652     Label loop(this, &var_index), done_loop(this);
1653     Goto(&loop);
1654     BIND(&loop);
1655     {
1656       Node* index = var_index.value();
1657       GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
1658 
1659       Node* reg_index =
1660           IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
1661       Node* value = LoadRegister(reg_index);
1662 
1663       Node* array_index = IntPtrAdd(formal_parameter_count_intptr, index);
1664       StoreFixedArrayElement(array, array_index, value);
1665 
1666       var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
1667       Goto(&loop);
1668     }
1669     BIND(&done_loop);
1670   }
1671 
1672   return array;
1673 }
1674 
ImportRegisterFile(TNode<FixedArray> array,const RegListNodePair & registers,TNode<Int32T> formal_parameter_count)1675 Node* InterpreterAssembler::ImportRegisterFile(
1676     TNode<FixedArray> array, const RegListNodePair& registers,
1677     TNode<Int32T> formal_parameter_count) {
1678   TNode<IntPtrT> formal_parameter_count_intptr =
1679       ChangeInt32ToIntPtr(formal_parameter_count);
1680   TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count());
1681   if (FLAG_debug_code) {
1682     CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
1683                                  RegisterLocation(Register(0))));
1684     AbortIfRegisterCountInvalid(array, formal_parameter_count_intptr,
1685                                 register_count);
1686   }
1687 
1688   TVARIABLE(IntPtrT, var_index, IntPtrConstant(0));
1689 
1690   // Iterate over array and write values into register file.  Also erase the
1691   // array contents to not keep them alive artificially.
1692   Label loop(this, &var_index), done_loop(this);
1693   Goto(&loop);
1694   BIND(&loop);
1695   {
1696     TNode<IntPtrT> index = var_index.value();
1697     GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
1698 
1699     TNode<IntPtrT> array_index =
1700         IntPtrAdd(formal_parameter_count_intptr, index);
1701     TNode<Object> value = LoadFixedArrayElement(array, array_index);
1702 
1703     TNode<IntPtrT> reg_index =
1704         IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
1705     StoreRegister(value, reg_index);
1706 
1707     StoreFixedArrayElement(array, array_index,
1708                            LoadRoot(Heap::kStaleRegisterRootIndex));
1709 
1710     var_index = IntPtrAdd(index, IntPtrConstant(1));
1711     Goto(&loop);
1712   }
1713   BIND(&done_loop);
1714 
1715   return array;
1716 }
1717 
CurrentBytecodeSize() const1718 int InterpreterAssembler::CurrentBytecodeSize() const {
1719   return Bytecodes::Size(bytecode_, operand_scale_);
1720 }
1721 
ToNumberOrNumeric(Object::Conversion mode)1722 void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) {
1723   Node* object = GetAccumulator();
1724   Node* context = GetContext();
1725 
1726   Variable var_type_feedback(this, MachineRepresentation::kTaggedSigned);
1727   Variable var_result(this, MachineRepresentation::kTagged);
1728   Label if_done(this), if_objectissmi(this), if_objectisheapnumber(this),
1729       if_objectisother(this, Label::kDeferred);
1730 
1731   GotoIf(TaggedIsSmi(object), &if_objectissmi);
1732   Branch(IsHeapNumber(object), &if_objectisheapnumber, &if_objectisother);
1733 
1734   BIND(&if_objectissmi);
1735   {
1736     var_result.Bind(object);
1737     var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kSignedSmall));
1738     Goto(&if_done);
1739   }
1740 
1741   BIND(&if_objectisheapnumber);
1742   {
1743     var_result.Bind(object);
1744     var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber));
1745     Goto(&if_done);
1746   }
1747 
1748   BIND(&if_objectisother);
1749   {
1750     auto builtin = Builtins::kNonNumberToNumber;
1751     if (mode == Object::Conversion::kToNumeric) {
1752       builtin = Builtins::kNonNumberToNumeric;
1753       // Special case for collecting BigInt feedback.
1754       Label not_bigint(this);
1755       GotoIfNot(IsBigInt(object), &not_bigint);
1756       {
1757         var_result.Bind(object);
1758         var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kBigInt));
1759         Goto(&if_done);
1760       }
1761       BIND(&not_bigint);
1762     }
1763 
1764     // Convert {object} by calling out to the appropriate builtin.
1765     var_result.Bind(CallBuiltin(builtin, context, object));
1766     var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny));
1767     Goto(&if_done);
1768   }
1769 
1770   BIND(&if_done);
1771 
1772   // Record the type feedback collected for {object}.
1773   Node* slot_index = BytecodeOperandIdx(0);
1774   Node* feedback_vector = LoadFeedbackVector();
1775   UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
1776 
1777   SetAccumulator(var_result.value());
1778   Dispatch();
1779 }
1780 
DeserializeLazyAndDispatch()1781 void InterpreterAssembler::DeserializeLazyAndDispatch() {
1782   Node* context = GetContext();
1783   Node* bytecode_offset = BytecodeOffset();
1784   Node* bytecode = LoadBytecode(bytecode_offset);
1785 
1786   Node* target_handler =
1787       CallRuntime(Runtime::kInterpreterDeserializeLazy, context,
1788                   SmiTag(bytecode), SmiConstant(operand_scale()));
1789   DispatchToBytecodeHandler(target_handler, bytecode_offset, bytecode);
1790 }
1791 
1792 }  // namespace interpreter
1793 }  // namespace internal
1794 }  // namespace v8
1795