• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/interpreter/interpreter-assembler.h"
6 
7 #include <limits>
8 #include <ostream>
9 
10 #include "src/code-factory.h"
11 #include "src/frames.h"
12 #include "src/interface-descriptors.h"
13 #include "src/interpreter/bytecodes.h"
14 #include "src/interpreter/interpreter.h"
15 #include "src/machine-type.h"
16 #include "src/macro-assembler.h"
17 #include "src/objects-inl.h"
18 #include "src/zone/zone.h"
19 
20 namespace v8 {
21 namespace internal {
22 namespace interpreter {
23 
24 using compiler::CodeAssemblerState;
25 using compiler::Node;
26 
InterpreterAssembler(CodeAssemblerState * state,Bytecode bytecode,OperandScale operand_scale)27 InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
28                                            Bytecode bytecode,
29                                            OperandScale operand_scale)
30     : CodeStubAssembler(state),
31       bytecode_(bytecode),
32       operand_scale_(operand_scale),
33       bytecode_offset_(this, MachineType::PointerRepresentation()),
34       interpreted_frame_pointer_(this, MachineType::PointerRepresentation()),
35       bytecode_array_(this, MachineRepresentation::kTagged),
36       dispatch_table_(this, MachineType::PointerRepresentation()),
37       accumulator_(this, MachineRepresentation::kTagged),
38       accumulator_use_(AccumulatorUse::kNone),
39       made_call_(false),
40       reloaded_frame_ptr_(false),
41       saved_bytecode_offset_(false),
42       disable_stack_check_across_call_(false),
43       stack_pointer_before_call_(nullptr) {
44   accumulator_.Bind(Parameter(InterpreterDispatchDescriptor::kAccumulator));
45   bytecode_offset_.Bind(
46       Parameter(InterpreterDispatchDescriptor::kBytecodeOffset));
47   bytecode_array_.Bind(
48       Parameter(InterpreterDispatchDescriptor::kBytecodeArray));
49   dispatch_table_.Bind(
50       Parameter(InterpreterDispatchDescriptor::kDispatchTable));
51 
52   if (FLAG_trace_ignition) {
53     TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
54   }
55   RegisterCallGenerationCallbacks([this] { CallPrologue(); },
56                                   [this] { CallEpilogue(); });
57 }
58 
~InterpreterAssembler()59 InterpreterAssembler::~InterpreterAssembler() {
60   // If the following check fails the handler does not use the
61   // accumulator in the way described in the bytecode definitions in
62   // bytecodes.h.
63   DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
64   UnregisterCallGenerationCallbacks();
65 }
66 
GetInterpretedFramePointer()67 Node* InterpreterAssembler::GetInterpretedFramePointer() {
68   if (!interpreted_frame_pointer_.IsBound()) {
69     interpreted_frame_pointer_.Bind(LoadParentFramePointer());
70   } else if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
71              !reloaded_frame_ptr_) {
72     interpreted_frame_pointer_.Bind(LoadParentFramePointer());
73     reloaded_frame_ptr_ = true;
74   }
75   return interpreted_frame_pointer_.value();
76 }
77 
GetAccumulatorUnchecked()78 Node* InterpreterAssembler::GetAccumulatorUnchecked() {
79   return accumulator_.value();
80 }
81 
GetAccumulator()82 Node* InterpreterAssembler::GetAccumulator() {
83   DCHECK(Bytecodes::ReadsAccumulator(bytecode_));
84   accumulator_use_ = accumulator_use_ | AccumulatorUse::kRead;
85   return GetAccumulatorUnchecked();
86 }
87 
SetAccumulator(Node * value)88 void InterpreterAssembler::SetAccumulator(Node* value) {
89   DCHECK(Bytecodes::WritesAccumulator(bytecode_));
90   accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
91   accumulator_.Bind(value);
92 }
93 
GetContext()94 Node* InterpreterAssembler::GetContext() {
95   return LoadRegister(Register::current_context());
96 }
97 
SetContext(Node * value)98 void InterpreterAssembler::SetContext(Node* value) {
99   StoreRegister(value, Register::current_context());
100 }
101 
GetContextAtDepth(Node * context,Node * depth)102 Node* InterpreterAssembler::GetContextAtDepth(Node* context, Node* depth) {
103   Variable cur_context(this, MachineRepresentation::kTaggedPointer);
104   cur_context.Bind(context);
105 
106   Variable cur_depth(this, MachineRepresentation::kWord32);
107   cur_depth.Bind(depth);
108 
109   Label context_found(this);
110 
111   Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context};
112   Label context_search(this, 2, context_search_loop_variables);
113 
114   // Fast path if the depth is 0.
115   Branch(Word32Equal(depth, Int32Constant(0)), &context_found, &context_search);
116 
117   // Loop until the depth is 0.
118   Bind(&context_search);
119   {
120     cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
121     cur_context.Bind(
122         LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
123 
124     Branch(Word32Equal(cur_depth.value(), Int32Constant(0)), &context_found,
125            &context_search);
126   }
127 
128   Bind(&context_found);
129   return cur_context.value();
130 }
131 
GotoIfHasContextExtensionUpToDepth(Node * context,Node * depth,Label * target)132 void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(Node* context,
133                                                               Node* depth,
134                                                               Label* target) {
135   Variable cur_context(this, MachineRepresentation::kTaggedPointer);
136   cur_context.Bind(context);
137 
138   Variable cur_depth(this, MachineRepresentation::kWord32);
139   cur_depth.Bind(depth);
140 
141   Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context};
142   Label context_search(this, 2, context_search_loop_variables);
143 
144   // Loop until the depth is 0.
145   Goto(&context_search);
146   Bind(&context_search);
147   {
148     // TODO(leszeks): We only need to do this check if the context had a sloppy
149     // eval, we could pass in a context chain bitmask to figure out which
150     // contexts actually need to be checked.
151 
152     Node* extension_slot =
153         LoadContextElement(cur_context.value(), Context::EXTENSION_INDEX);
154 
155     // Jump to the target if the extension slot is not a hole.
156     GotoIf(WordNotEqual(extension_slot, TheHoleConstant()), target);
157 
158     cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
159     cur_context.Bind(
160         LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
161 
162     GotoIf(Word32NotEqual(cur_depth.value(), Int32Constant(0)),
163            &context_search);
164   }
165 }
166 
BytecodeOffset()167 Node* InterpreterAssembler::BytecodeOffset() {
168   if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
169       (bytecode_offset_.value() ==
170        Parameter(InterpreterDispatchDescriptor::kBytecodeOffset))) {
171     bytecode_offset_.Bind(LoadAndUntagRegister(Register::bytecode_offset()));
172   }
173   return bytecode_offset_.value();
174 }
175 
BytecodeArrayTaggedPointer()176 Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
177   // Force a re-load of the bytecode array after every call in case the debugger
178   // has been activated.
179   if (made_call_ &&
180       (bytecode_array_.value() ==
181        Parameter(InterpreterDispatchDescriptor::kBytecodeArray))) {
182     bytecode_array_.Bind(LoadRegister(Register::bytecode_array()));
183   }
184   return bytecode_array_.value();
185 }
186 
DispatchTableRawPointer()187 Node* InterpreterAssembler::DispatchTableRawPointer() {
188   if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
189       (dispatch_table_.value() ==
190        Parameter(InterpreterDispatchDescriptor::kDispatchTable))) {
191     dispatch_table_.Bind(ExternalConstant(
192         ExternalReference::interpreter_dispatch_table_address(isolate())));
193   }
194   return dispatch_table_.value();
195 }
196 
RegisterLocation(Node * reg_index)197 Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
198   return IntPtrAdd(GetInterpretedFramePointer(),
199                    RegisterFrameOffset(reg_index));
200 }
201 
RegisterFrameOffset(Node * index)202 Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
203   return WordShl(index, kPointerSizeLog2);
204 }
205 
LoadRegister(Register reg)206 Node* InterpreterAssembler::LoadRegister(Register reg) {
207   return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
208               IntPtrConstant(reg.ToOperand() << kPointerSizeLog2));
209 }
210 
LoadRegister(Node * reg_index)211 Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
212   return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
213               RegisterFrameOffset(reg_index));
214 }
215 
LoadAndUntagRegister(Register reg)216 Node* InterpreterAssembler::LoadAndUntagRegister(Register reg) {
217   return LoadAndUntagSmi(GetInterpretedFramePointer(), reg.ToOperand()
218                                                            << kPointerSizeLog2);
219 }
220 
StoreRegister(Node * value,Register reg)221 Node* InterpreterAssembler::StoreRegister(Node* value, Register reg) {
222   return StoreNoWriteBarrier(
223       MachineRepresentation::kTagged, GetInterpretedFramePointer(),
224       IntPtrConstant(reg.ToOperand() << kPointerSizeLog2), value);
225 }
226 
StoreRegister(Node * value,Node * reg_index)227 Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
228   return StoreNoWriteBarrier(MachineRepresentation::kTagged,
229                              GetInterpretedFramePointer(),
230                              RegisterFrameOffset(reg_index), value);
231 }
232 
StoreAndTagRegister(compiler::Node * value,Register reg)233 Node* InterpreterAssembler::StoreAndTagRegister(compiler::Node* value,
234                                                 Register reg) {
235   int offset = reg.ToOperand() << kPointerSizeLog2;
236   return StoreAndTagSmi(GetInterpretedFramePointer(), offset, value);
237 }
238 
NextRegister(Node * reg_index)239 Node* InterpreterAssembler::NextRegister(Node* reg_index) {
240   // Register indexes are negative, so the next index is minus one.
241   return IntPtrAdd(reg_index, IntPtrConstant(-1));
242 }
243 
OperandOffset(int operand_index)244 Node* InterpreterAssembler::OperandOffset(int operand_index) {
245   return IntPtrConstant(
246       Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()));
247 }
248 
BytecodeOperandUnsignedByte(int operand_index)249 Node* InterpreterAssembler::BytecodeOperandUnsignedByte(int operand_index) {
250   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
251   DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
252                                     bytecode_, operand_index, operand_scale()));
253   Node* operand_offset = OperandOffset(operand_index);
254   return Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
255               IntPtrAdd(BytecodeOffset(), operand_offset));
256 }
257 
BytecodeOperandSignedByte(int operand_index)258 Node* InterpreterAssembler::BytecodeOperandSignedByte(int operand_index) {
259   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
260   DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
261                                     bytecode_, operand_index, operand_scale()));
262   Node* operand_offset = OperandOffset(operand_index);
263   return Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
264               IntPtrAdd(BytecodeOffset(), operand_offset));
265 }
266 
BytecodeOperandReadUnaligned(int relative_offset,MachineType result_type)267 compiler::Node* InterpreterAssembler::BytecodeOperandReadUnaligned(
268     int relative_offset, MachineType result_type) {
269   static const int kMaxCount = 4;
270   DCHECK(!TargetSupportsUnalignedAccess());
271 
272   int count;
273   switch (result_type.representation()) {
274     case MachineRepresentation::kWord16:
275       count = 2;
276       break;
277     case MachineRepresentation::kWord32:
278       count = 4;
279       break;
280     default:
281       UNREACHABLE();
282       break;
283   }
284   MachineType msb_type =
285       result_type.IsSigned() ? MachineType::Int8() : MachineType::Uint8();
286 
287 #if V8_TARGET_LITTLE_ENDIAN
288   const int kStep = -1;
289   int msb_offset = count - 1;
290 #elif V8_TARGET_BIG_ENDIAN
291   const int kStep = 1;
292   int msb_offset = 0;
293 #else
294 #error "Unknown Architecture"
295 #endif
296 
297   // Read the most signicant bytecode into bytes[0] and then in order
298   // down to least significant in bytes[count - 1].
299   DCHECK(count <= kMaxCount);
300   compiler::Node* bytes[kMaxCount];
301   for (int i = 0; i < count; i++) {
302     MachineType machine_type = (i == 0) ? msb_type : MachineType::Uint8();
303     Node* offset = IntPtrConstant(relative_offset + msb_offset + i * kStep);
304     Node* array_offset = IntPtrAdd(BytecodeOffset(), offset);
305     bytes[i] = Load(machine_type, BytecodeArrayTaggedPointer(), array_offset);
306   }
307 
308   // Pack LSB to MSB.
309   Node* result = bytes[--count];
310   for (int i = 1; --count >= 0; i++) {
311     Node* shift = Int32Constant(i * kBitsPerByte);
312     Node* value = Word32Shl(bytes[count], shift);
313     result = Word32Or(value, result);
314   }
315   return result;
316 }
317 
BytecodeOperandUnsignedShort(int operand_index)318 Node* InterpreterAssembler::BytecodeOperandUnsignedShort(int operand_index) {
319   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
320   DCHECK_EQ(
321       OperandSize::kShort,
322       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
323   int operand_offset =
324       Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
325   if (TargetSupportsUnalignedAccess()) {
326     return Load(MachineType::Uint16(), BytecodeArrayTaggedPointer(),
327                 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
328   } else {
329     return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint16());
330   }
331 }
332 
BytecodeOperandSignedShort(int operand_index)333 Node* InterpreterAssembler::BytecodeOperandSignedShort(int operand_index) {
334   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
335   DCHECK_EQ(
336       OperandSize::kShort,
337       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
338   int operand_offset =
339       Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
340   if (TargetSupportsUnalignedAccess()) {
341     return Load(MachineType::Int16(), BytecodeArrayTaggedPointer(),
342                 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
343   } else {
344     return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16());
345   }
346 }
347 
BytecodeOperandUnsignedQuad(int operand_index)348 Node* InterpreterAssembler::BytecodeOperandUnsignedQuad(int operand_index) {
349   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
350   DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
351                                     bytecode_, operand_index, operand_scale()));
352   int operand_offset =
353       Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
354   if (TargetSupportsUnalignedAccess()) {
355     return Load(MachineType::Uint32(), BytecodeArrayTaggedPointer(),
356                 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
357   } else {
358     return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint32());
359   }
360 }
361 
BytecodeOperandSignedQuad(int operand_index)362 Node* InterpreterAssembler::BytecodeOperandSignedQuad(int operand_index) {
363   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
364   DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
365                                     bytecode_, operand_index, operand_scale()));
366   int operand_offset =
367       Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
368   if (TargetSupportsUnalignedAccess()) {
369     return Load(MachineType::Int32(), BytecodeArrayTaggedPointer(),
370                 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
371   } else {
372     return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32());
373   }
374 }
375 
BytecodeSignedOperand(int operand_index,OperandSize operand_size)376 Node* InterpreterAssembler::BytecodeSignedOperand(int operand_index,
377                                                   OperandSize operand_size) {
378   DCHECK(!Bytecodes::IsUnsignedOperandType(
379       Bytecodes::GetOperandType(bytecode_, operand_index)));
380   switch (operand_size) {
381     case OperandSize::kByte:
382       return BytecodeOperandSignedByte(operand_index);
383     case OperandSize::kShort:
384       return BytecodeOperandSignedShort(operand_index);
385     case OperandSize::kQuad:
386       return BytecodeOperandSignedQuad(operand_index);
387     case OperandSize::kNone:
388       UNREACHABLE();
389   }
390   return nullptr;
391 }
392 
BytecodeUnsignedOperand(int operand_index,OperandSize operand_size)393 Node* InterpreterAssembler::BytecodeUnsignedOperand(int operand_index,
394                                                     OperandSize operand_size) {
395   DCHECK(Bytecodes::IsUnsignedOperandType(
396       Bytecodes::GetOperandType(bytecode_, operand_index)));
397   switch (operand_size) {
398     case OperandSize::kByte:
399       return BytecodeOperandUnsignedByte(operand_index);
400     case OperandSize::kShort:
401       return BytecodeOperandUnsignedShort(operand_index);
402     case OperandSize::kQuad:
403       return BytecodeOperandUnsignedQuad(operand_index);
404     case OperandSize::kNone:
405       UNREACHABLE();
406   }
407   return nullptr;
408 }
409 
BytecodeOperandCount(int operand_index)410 Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
411   DCHECK_EQ(OperandType::kRegCount,
412             Bytecodes::GetOperandType(bytecode_, operand_index));
413   OperandSize operand_size =
414       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
415   return BytecodeUnsignedOperand(operand_index, operand_size);
416 }
417 
BytecodeOperandFlag(int operand_index)418 Node* InterpreterAssembler::BytecodeOperandFlag(int operand_index) {
419   DCHECK_EQ(OperandType::kFlag8,
420             Bytecodes::GetOperandType(bytecode_, operand_index));
421   OperandSize operand_size =
422       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
423   DCHECK_EQ(operand_size, OperandSize::kByte);
424   return BytecodeUnsignedOperand(operand_index, operand_size);
425 }
426 
BytecodeOperandUImm(int operand_index)427 Node* InterpreterAssembler::BytecodeOperandUImm(int operand_index) {
428   DCHECK_EQ(OperandType::kUImm,
429             Bytecodes::GetOperandType(bytecode_, operand_index));
430   OperandSize operand_size =
431       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
432   return BytecodeUnsignedOperand(operand_index, operand_size);
433 }
434 
BytecodeOperandUImmWord(int operand_index)435 Node* InterpreterAssembler::BytecodeOperandUImmWord(int operand_index) {
436   return ChangeUint32ToWord(BytecodeOperandUImm(operand_index));
437 }
438 
BytecodeOperandImm(int operand_index)439 Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
440   DCHECK_EQ(OperandType::kImm,
441             Bytecodes::GetOperandType(bytecode_, operand_index));
442   OperandSize operand_size =
443       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
444   return BytecodeSignedOperand(operand_index, operand_size);
445 }
446 
BytecodeOperandImmIntPtr(int operand_index)447 Node* InterpreterAssembler::BytecodeOperandImmIntPtr(int operand_index) {
448   return ChangeInt32ToIntPtr(BytecodeOperandImm(operand_index));
449 }
450 
BytecodeOperandImmSmi(int operand_index)451 Node* InterpreterAssembler::BytecodeOperandImmSmi(int operand_index) {
452   return SmiFromWord32(BytecodeOperandImm(operand_index));
453 }
454 
BytecodeOperandIdx(int operand_index)455 Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
456   DCHECK(OperandType::kIdx ==
457          Bytecodes::GetOperandType(bytecode_, operand_index));
458   OperandSize operand_size =
459       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
460   return ChangeUint32ToWord(
461       BytecodeUnsignedOperand(operand_index, operand_size));
462 }
463 
BytecodeOperandIdxSmi(int operand_index)464 Node* InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) {
465   return SmiTag(BytecodeOperandIdx(operand_index));
466 }
467 
BytecodeOperandReg(int operand_index)468 Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) {
469   DCHECK(Bytecodes::IsRegisterOperandType(
470       Bytecodes::GetOperandType(bytecode_, operand_index)));
471   OperandSize operand_size =
472       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
473   return ChangeInt32ToIntPtr(
474       BytecodeSignedOperand(operand_index, operand_size));
475 }
476 
BytecodeOperandRuntimeId(int operand_index)477 Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) {
478   DCHECK(OperandType::kRuntimeId ==
479          Bytecodes::GetOperandType(bytecode_, operand_index));
480   OperandSize operand_size =
481       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
482   DCHECK_EQ(operand_size, OperandSize::kShort);
483   return BytecodeUnsignedOperand(operand_index, operand_size);
484 }
485 
BytecodeOperandIntrinsicId(int operand_index)486 Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) {
487   DCHECK(OperandType::kIntrinsicId ==
488          Bytecodes::GetOperandType(bytecode_, operand_index));
489   OperandSize operand_size =
490       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
491   DCHECK_EQ(operand_size, OperandSize::kByte);
492   return BytecodeUnsignedOperand(operand_index, operand_size);
493 }
494 
LoadConstantPoolEntry(Node * index)495 Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
496   Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
497                                         BytecodeArray::kConstantPoolOffset);
498   return LoadFixedArrayElement(constant_pool, index);
499 }
500 
LoadAndUntagConstantPoolEntry(Node * index)501 Node* InterpreterAssembler::LoadAndUntagConstantPoolEntry(Node* index) {
502   return SmiUntag(LoadConstantPoolEntry(index));
503 }
504 
LoadFeedbackVector()505 Node* InterpreterAssembler::LoadFeedbackVector() {
506   Node* function = LoadRegister(Register::function_closure());
507   Node* cell = LoadObjectField(function, JSFunction::kFeedbackVectorOffset);
508   Node* vector = LoadObjectField(cell, Cell::kValueOffset);
509   return vector;
510 }
511 
SaveBytecodeOffset()512 void InterpreterAssembler::SaveBytecodeOffset() {
513   DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
514   StoreAndTagRegister(BytecodeOffset(), Register::bytecode_offset());
515   saved_bytecode_offset_ = true;
516 }
517 
CallPrologue()518 void InterpreterAssembler::CallPrologue() {
519   if (!saved_bytecode_offset_) {
520     // If there are multiple calls in the bytecode handler, you need to spill
521     // before each of them, unless SaveBytecodeOffset has explicitly been called
522     // in a path that dominates _all_ of those calls. Therefore don't set
523     // saved_bytecode_offset_ to true or call SaveBytecodeOffset.
524     StoreAndTagRegister(BytecodeOffset(), Register::bytecode_offset());
525   }
526 
527   if (FLAG_debug_code && !disable_stack_check_across_call_) {
528     DCHECK(stack_pointer_before_call_ == nullptr);
529     stack_pointer_before_call_ = LoadStackPointer();
530   }
531   made_call_ = true;
532 }
533 
CallEpilogue()534 void InterpreterAssembler::CallEpilogue() {
535   if (FLAG_debug_code && !disable_stack_check_across_call_) {
536     Node* stack_pointer_after_call = LoadStackPointer();
537     Node* stack_pointer_before_call = stack_pointer_before_call_;
538     stack_pointer_before_call_ = nullptr;
539     AbortIfWordNotEqual(stack_pointer_before_call, stack_pointer_after_call,
540                         kUnexpectedStackPointer);
541   }
542 }
543 
IncrementCallCount(Node * feedback_vector,Node * slot_id)544 Node* InterpreterAssembler::IncrementCallCount(Node* feedback_vector,
545                                                Node* slot_id) {
546   Comment("increment call count");
547   Node* call_count_slot = IntPtrAdd(slot_id, IntPtrConstant(1));
548   Node* call_count = LoadFixedArrayElement(feedback_vector, call_count_slot);
549   Node* new_count = SmiAdd(call_count, SmiConstant(1));
550   // Count is Smi, so we don't need a write barrier.
551   return StoreFixedArrayElement(feedback_vector, call_count_slot, new_count,
552                                 SKIP_WRITE_BARRIER);
553 }
554 
CallJSWithFeedback(Node * function,Node * context,Node * first_arg,Node * arg_count,Node * slot_id,Node * feedback_vector,TailCallMode tail_call_mode)555 Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
556                                                Node* first_arg, Node* arg_count,
557                                                Node* slot_id,
558                                                Node* feedback_vector,
559                                                TailCallMode tail_call_mode) {
560   // Static checks to assert it is safe to examine the type feedback element.
561   // We don't know that we have a weak cell. We might have a private symbol
562   // or an AllocationSite, but the memory is safe to examine.
563   // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
564   // FixedArray.
565   // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
566   // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
567   // computed, meaning that it can't appear to be a pointer. If the low bit is
568   // 0, then hash is computed, but the 0 bit prevents the field from appearing
569   // to be a pointer.
570   DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
571   DCHECK(Bytecodes::IsCallOrConstruct(bytecode_));
572   STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
573   STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
574                     WeakCell::kValueOffset &&
575                 WeakCell::kValueOffset == Symbol::kHashFieldSlot);
576 
577   Variable return_value(this, MachineRepresentation::kTagged);
578   Label call_function(this), extra_checks(this, Label::kDeferred), call(this),
579       end(this);
580 
581   // The checks. First, does function match the recorded monomorphic target?
582   Node* feedback_element = LoadFixedArrayElement(feedback_vector, slot_id);
583   Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element);
584   Node* is_monomorphic = WordEqual(function, feedback_value);
585   GotoIfNot(is_monomorphic, &extra_checks);
586 
587   // The compare above could have been a SMI/SMI comparison. Guard against
588   // this convincing us that we have a monomorphic JSFunction.
589   Node* is_smi = TaggedIsSmi(function);
590   Branch(is_smi, &extra_checks, &call_function);
591 
592   Bind(&call_function);
593   {
594     // Increment the call count.
595     IncrementCallCount(feedback_vector, slot_id);
596 
597     // Call using call function builtin.
598     Callable callable = CodeFactory::InterpreterPushArgsAndCall(
599         isolate(), tail_call_mode, InterpreterPushArgsMode::kJSFunction);
600     Node* code_target = HeapConstant(callable.code());
601     Node* ret_value = CallStub(callable.descriptor(), code_target, context,
602                                arg_count, first_arg, function);
603     return_value.Bind(ret_value);
604     Goto(&end);
605   }
606 
607   Bind(&extra_checks);
608   {
609     Label check_initialized(this), mark_megamorphic(this),
610         create_allocation_site(this);
611 
612     Comment("check if megamorphic");
613     // Check if it is a megamorphic target.
614     Node* is_megamorphic =
615         WordEqual(feedback_element,
616                   HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
617     GotoIf(is_megamorphic, &call);
618 
619     Comment("check if it is an allocation site");
620     GotoIfNot(IsAllocationSiteMap(LoadMap(feedback_element)),
621               &check_initialized);
622 
623     // If it is not the Array() function, mark megamorphic.
624     Node* context_slot = LoadContextElement(LoadNativeContext(context),
625                                             Context::ARRAY_FUNCTION_INDEX);
626     Node* is_array_function = WordEqual(context_slot, function);
627     GotoIfNot(is_array_function, &mark_megamorphic);
628 
629     // It is a monomorphic Array function. Increment the call count.
630     IncrementCallCount(feedback_vector, slot_id);
631 
632     // Call ArrayConstructorStub.
633     Callable callable_call =
634         CodeFactory::InterpreterPushArgsAndConstructArray(isolate());
635     Node* code_target_call = HeapConstant(callable_call.code());
636     Node* ret_value =
637         CallStub(callable_call.descriptor(), code_target_call, context,
638                  arg_count, function, feedback_element, first_arg);
639     return_value.Bind(ret_value);
640     Goto(&end);
641 
642     Bind(&check_initialized);
643     {
644       Comment("check if uninitialized");
645       // Check if it is uninitialized target first.
646       Node* is_uninitialized = WordEqual(
647           feedback_element,
648           HeapConstant(FeedbackVector::UninitializedSentinel(isolate())));
649       GotoIfNot(is_uninitialized, &mark_megamorphic);
650 
651       Comment("handle_unitinitialized");
652       // If it is not a JSFunction mark it as megamorphic.
653       Node* is_smi = TaggedIsSmi(function);
654       GotoIf(is_smi, &mark_megamorphic);
655 
656       // Check if function is an object of JSFunction type.
657       Node* instance_type = LoadInstanceType(function);
658       Node* is_js_function =
659           Word32Equal(instance_type, Int32Constant(JS_FUNCTION_TYPE));
660       GotoIfNot(is_js_function, &mark_megamorphic);
661 
662       // Check if it is the Array() function.
663       Node* context_slot = LoadContextElement(LoadNativeContext(context),
664                                               Context::ARRAY_FUNCTION_INDEX);
665       Node* is_array_function = WordEqual(context_slot, function);
666       GotoIf(is_array_function, &create_allocation_site);
667 
668       // Check if the function belongs to the same native context.
669       Node* native_context = LoadNativeContext(
670           LoadObjectField(function, JSFunction::kContextOffset));
671       Node* is_same_native_context =
672           WordEqual(native_context, LoadNativeContext(context));
673       GotoIfNot(is_same_native_context, &mark_megamorphic);
674 
675       CreateWeakCellInFeedbackVector(feedback_vector, SmiTag(slot_id),
676                                      function);
677 
678       // Call using call function builtin.
679       Goto(&call_function);
680     }
681 
682     Bind(&create_allocation_site);
683     {
684       CreateAllocationSiteInFeedbackVector(feedback_vector, SmiTag(slot_id));
685 
686       // Call using CallFunction builtin. CallICs have a PREMONOMORPHIC state.
687       // They start collecting feedback only when a call is executed the second
688       // time. So, do not pass any feedback here.
689       Goto(&call_function);
690     }
691 
692     Bind(&mark_megamorphic);
693     {
694       // Mark it as a megamorphic.
695       // MegamorphicSentinel is created as a part of Heap::InitialObjects
696       // and will not move during a GC. So it is safe to skip write barrier.
697       DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
698       StoreFixedArrayElement(
699           feedback_vector, slot_id,
700           HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
701           SKIP_WRITE_BARRIER);
702       Goto(&call);
703     }
704   }
705 
706   Bind(&call);
707   {
708     Comment("Increment call count and call using Call builtin");
709     // Increment the call count.
710     IncrementCallCount(feedback_vector, slot_id);
711 
712     // Call using call builtin.
713     Callable callable_call = CodeFactory::InterpreterPushArgsAndCall(
714         isolate(), tail_call_mode, InterpreterPushArgsMode::kOther);
715     Node* code_target_call = HeapConstant(callable_call.code());
716     Node* ret_value = CallStub(callable_call.descriptor(), code_target_call,
717                                context, arg_count, first_arg, function);
718     return_value.Bind(ret_value);
719     Goto(&end);
720   }
721 
722   Bind(&end);
723   return return_value.value();
724 }
725 
CallJS(Node * function,Node * context,Node * first_arg,Node * arg_count,TailCallMode tail_call_mode)726 Node* InterpreterAssembler::CallJS(Node* function, Node* context,
727                                    Node* first_arg, Node* arg_count,
728                                    TailCallMode tail_call_mode) {
729   DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
730   DCHECK(Bytecodes::IsCallOrConstruct(bytecode_));
731   Callable callable = CodeFactory::InterpreterPushArgsAndCall(
732       isolate(), tail_call_mode, InterpreterPushArgsMode::kOther);
733   Node* code_target = HeapConstant(callable.code());
734 
735   return CallStub(callable.descriptor(), code_target, context, arg_count,
736                   first_arg, function);
737 }
738 
CallJSWithSpread(Node * function,Node * context,Node * first_arg,Node * arg_count)739 Node* InterpreterAssembler::CallJSWithSpread(Node* function, Node* context,
740                                              Node* first_arg, Node* arg_count) {
741   DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
742   Callable callable = CodeFactory::InterpreterPushArgsAndCall(
743       isolate(), TailCallMode::kDisallow,
744       InterpreterPushArgsMode::kWithFinalSpread);
745   Node* code_target = HeapConstant(callable.code());
746 
747   return CallStub(callable.descriptor(), code_target, context, arg_count,
748                   first_arg, function);
749 }
750 
Construct(Node * constructor,Node * context,Node * new_target,Node * first_arg,Node * arg_count,Node * slot_id,Node * feedback_vector)751 Node* InterpreterAssembler::Construct(Node* constructor, Node* context,
752                                       Node* new_target, Node* first_arg,
753                                       Node* arg_count, Node* slot_id,
754                                       Node* feedback_vector) {
755   DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
756   Variable return_value(this, MachineRepresentation::kTagged);
757   Variable allocation_feedback(this, MachineRepresentation::kTagged);
758   Label call_construct_function(this, &allocation_feedback),
759       extra_checks(this, Label::kDeferred), call_construct(this), end(this);
760 
761   // Slot id of 0 is used to indicate no type feedback is available.
762   STATIC_ASSERT(FeedbackVector::kReservedIndexCount > 0);
763   Node* is_feedback_unavailable = WordEqual(slot_id, IntPtrConstant(0));
764   GotoIf(is_feedback_unavailable, &call_construct);
765 
766   // Check that the constructor is not a smi.
767   Node* is_smi = TaggedIsSmi(constructor);
768   GotoIf(is_smi, &call_construct);
769 
770   // Check that constructor is a JSFunction.
771   Node* instance_type = LoadInstanceType(constructor);
772   Node* is_js_function =
773       Word32Equal(instance_type, Int32Constant(JS_FUNCTION_TYPE));
774   GotoIfNot(is_js_function, &call_construct);
775 
776   // Check if it is a monomorphic constructor.
777   Node* feedback_element = LoadFixedArrayElement(feedback_vector, slot_id);
778   Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element);
779   Node* is_monomorphic = WordEqual(constructor, feedback_value);
780   allocation_feedback.Bind(UndefinedConstant());
781   Branch(is_monomorphic, &call_construct_function, &extra_checks);
782 
783   Bind(&call_construct_function);
784   {
785     Comment("call using ConstructFunction");
786     IncrementCallCount(feedback_vector, slot_id);
787     Callable callable_function = CodeFactory::InterpreterPushArgsAndConstruct(
788         isolate(), InterpreterPushArgsMode::kJSFunction);
789     return_value.Bind(CallStub(callable_function.descriptor(),
790                                HeapConstant(callable_function.code()), context,
791                                arg_count, new_target, constructor,
792                                allocation_feedback.value(), first_arg));
793     Goto(&end);
794   }
795 
796   Bind(&extra_checks);
797   {
798     Label check_allocation_site(this), check_initialized(this),
799         initialize(this), mark_megamorphic(this);
800 
801     // Check if it is a megamorphic target.
802     Comment("check if megamorphic");
803     Node* is_megamorphic =
804         WordEqual(feedback_element,
805                   HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
806     GotoIf(is_megamorphic, &call_construct_function);
807 
808     Comment("check if weak cell");
809     Node* is_weak_cell = WordEqual(LoadMap(feedback_element),
810                                    LoadRoot(Heap::kWeakCellMapRootIndex));
811     GotoIfNot(is_weak_cell, &check_allocation_site);
812 
813     // If the weak cell is cleared, we have a new chance to become
814     // monomorphic.
815     Comment("check if weak cell is cleared");
816     Node* is_smi = TaggedIsSmi(feedback_value);
817     Branch(is_smi, &initialize, &mark_megamorphic);
818 
819     Bind(&check_allocation_site);
820     {
821       Comment("check if it is an allocation site");
822       Node* is_allocation_site =
823           WordEqual(LoadObjectField(feedback_element, 0),
824                     LoadRoot(Heap::kAllocationSiteMapRootIndex));
825       GotoIfNot(is_allocation_site, &check_initialized);
826 
827       // Make sure the function is the Array() function.
828       Node* context_slot = LoadContextElement(LoadNativeContext(context),
829                                               Context::ARRAY_FUNCTION_INDEX);
830       Node* is_array_function = WordEqual(context_slot, constructor);
831       GotoIfNot(is_array_function, &mark_megamorphic);
832 
833       allocation_feedback.Bind(feedback_element);
834       Goto(&call_construct_function);
835     }
836 
837     Bind(&check_initialized);
838     {
839       // Check if it is uninitialized.
840       Comment("check if uninitialized");
841       Node* is_uninitialized = WordEqual(
842           feedback_element, LoadRoot(Heap::kuninitialized_symbolRootIndex));
843       Branch(is_uninitialized, &initialize, &mark_megamorphic);
844     }
845 
846     Bind(&initialize);
847     {
848       Label create_allocation_site(this), create_weak_cell(this);
849       Comment("initialize the feedback element");
850       // Create an allocation site if the function is an array function,
851       // otherwise create a weak cell.
852       Node* context_slot = LoadContextElement(LoadNativeContext(context),
853                                               Context::ARRAY_FUNCTION_INDEX);
854       Node* is_array_function = WordEqual(context_slot, constructor);
855       Branch(is_array_function, &create_allocation_site, &create_weak_cell);
856 
857       Bind(&create_allocation_site);
858       {
859         Node* site = CreateAllocationSiteInFeedbackVector(feedback_vector,
860                                                           SmiTag(slot_id));
861         allocation_feedback.Bind(site);
862         Goto(&call_construct_function);
863       }
864 
865       Bind(&create_weak_cell);
866       {
867         CreateWeakCellInFeedbackVector(feedback_vector, SmiTag(slot_id),
868                                        constructor);
869         Goto(&call_construct_function);
870       }
871     }
872 
873     Bind(&mark_megamorphic);
874     {
875       // MegamorphicSentinel is an immortal immovable object so
876       // write-barrier is not needed.
877       Comment("transition to megamorphic");
878       DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
879       StoreFixedArrayElement(
880           feedback_vector, slot_id,
881           HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
882           SKIP_WRITE_BARRIER);
883       Goto(&call_construct_function);
884     }
885   }
886 
887   Bind(&call_construct);
888   {
889     Comment("call using Construct builtin");
890     Callable callable = CodeFactory::InterpreterPushArgsAndConstruct(
891         isolate(), InterpreterPushArgsMode::kOther);
892     Node* code_target = HeapConstant(callable.code());
893     return_value.Bind(CallStub(callable.descriptor(), code_target, context,
894                                arg_count, new_target, constructor,
895                                UndefinedConstant(), first_arg));
896     Goto(&end);
897   }
898 
899   Bind(&end);
900   return return_value.value();
901 }
902 
ConstructWithSpread(Node * constructor,Node * context,Node * new_target,Node * first_arg,Node * arg_count)903 Node* InterpreterAssembler::ConstructWithSpread(Node* constructor,
904                                                 Node* context, Node* new_target,
905                                                 Node* first_arg,
906                                                 Node* arg_count) {
907   DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
908   Variable return_value(this, MachineRepresentation::kTagged);
909   Comment("call using ConstructWithSpread");
910   Callable callable = CodeFactory::InterpreterPushArgsAndConstruct(
911       isolate(), InterpreterPushArgsMode::kWithFinalSpread);
912   Node* code_target = HeapConstant(callable.code());
913   return_value.Bind(CallStub(callable.descriptor(), code_target, context,
914                              arg_count, new_target, constructor,
915                              UndefinedConstant(), first_arg));
916 
917   return return_value.value();
918 }
919 
CallRuntimeN(Node * function_id,Node * context,Node * first_arg,Node * arg_count,int result_size)920 Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
921                                          Node* first_arg, Node* arg_count,
922                                          int result_size) {
923   DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
924   DCHECK(Bytecodes::IsCallRuntime(bytecode_));
925   Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size);
926   Node* code_target = HeapConstant(callable.code());
927 
928   // Get the function entry from the function id.
929   Node* function_table = ExternalConstant(
930       ExternalReference::runtime_function_table_address(isolate()));
931   Node* function_offset =
932       Int32Mul(function_id, Int32Constant(sizeof(Runtime::Function)));
933   Node* function =
934       IntPtrAdd(function_table, ChangeUint32ToWord(function_offset));
935   Node* function_entry =
936       Load(MachineType::Pointer(), function,
937            IntPtrConstant(offsetof(Runtime::Function, entry)));
938 
939   return CallStubR(callable.descriptor(), result_size, code_target, context,
940                    arg_count, first_arg, function_entry);
941 }
942 
UpdateInterruptBudget(Node * weight,bool backward)943 void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
944   Label ok(this), interrupt_check(this, Label::kDeferred), end(this);
945   Node* budget_offset =
946       IntPtrConstant(BytecodeArray::kInterruptBudgetOffset - kHeapObjectTag);
947 
948   // Update budget by |weight| and check if it reaches zero.
949   Variable new_budget(this, MachineRepresentation::kWord32);
950   Node* old_budget =
951       Load(MachineType::Int32(), BytecodeArrayTaggedPointer(), budget_offset);
952   if (backward) {
953     new_budget.Bind(Int32Sub(old_budget, weight));
954   } else {
955     new_budget.Bind(Int32Add(old_budget, weight));
956   }
957   Node* condition =
958       Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0));
959   Branch(condition, &ok, &interrupt_check);
960 
961   // Perform interrupt and reset budget.
962   Bind(&interrupt_check);
963   {
964     CallRuntime(Runtime::kInterrupt, GetContext());
965     new_budget.Bind(Int32Constant(Interpreter::InterruptBudget()));
966     Goto(&ok);
967   }
968 
969   // Update budget.
970   Bind(&ok);
971   StoreNoWriteBarrier(MachineRepresentation::kWord32,
972                       BytecodeArrayTaggedPointer(), budget_offset,
973                       new_budget.value());
974 }
975 
Advance()976 Node* InterpreterAssembler::Advance() {
977   return Advance(Bytecodes::Size(bytecode_, operand_scale_));
978 }
979 
Advance(int delta)980 Node* InterpreterAssembler::Advance(int delta) {
981   return Advance(IntPtrConstant(delta));
982 }
983 
Advance(Node * delta,bool backward)984 Node* InterpreterAssembler::Advance(Node* delta, bool backward) {
985   if (FLAG_trace_ignition) {
986     TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
987   }
988   Node* next_offset = backward ? IntPtrSub(BytecodeOffset(), delta)
989                                : IntPtrAdd(BytecodeOffset(), delta);
990   bytecode_offset_.Bind(next_offset);
991   return next_offset;
992 }
993 
Jump(Node * delta,bool backward)994 Node* InterpreterAssembler::Jump(Node* delta, bool backward) {
995   DCHECK(!Bytecodes::IsStarLookahead(bytecode_, operand_scale_));
996 
997   UpdateInterruptBudget(TruncateWordToWord32(delta), backward);
998   Node* new_bytecode_offset = Advance(delta, backward);
999   Node* target_bytecode = LoadBytecode(new_bytecode_offset);
1000   return DispatchToBytecode(target_bytecode, new_bytecode_offset);
1001 }
1002 
Jump(Node * delta)1003 Node* InterpreterAssembler::Jump(Node* delta) { return Jump(delta, false); }
1004 
JumpBackward(Node * delta)1005 Node* InterpreterAssembler::JumpBackward(Node* delta) {
1006   return Jump(delta, true);
1007 }
1008 
JumpConditional(Node * condition,Node * delta)1009 void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
1010   Label match(this), no_match(this);
1011 
1012   Branch(condition, &match, &no_match);
1013   Bind(&match);
1014   Jump(delta);
1015   Bind(&no_match);
1016   Dispatch();
1017 }
1018 
JumpIfWordEqual(Node * lhs,Node * rhs,Node * delta)1019 void InterpreterAssembler::JumpIfWordEqual(Node* lhs, Node* rhs, Node* delta) {
1020   JumpConditional(WordEqual(lhs, rhs), delta);
1021 }
1022 
JumpIfWordNotEqual(Node * lhs,Node * rhs,Node * delta)1023 void InterpreterAssembler::JumpIfWordNotEqual(Node* lhs, Node* rhs,
1024                                               Node* delta) {
1025   JumpConditional(WordNotEqual(lhs, rhs), delta);
1026 }
1027 
LoadBytecode(compiler::Node * bytecode_offset)1028 Node* InterpreterAssembler::LoadBytecode(compiler::Node* bytecode_offset) {
1029   Node* bytecode =
1030       Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(), bytecode_offset);
1031   return ChangeUint32ToWord(bytecode);
1032 }
1033 
StarDispatchLookahead(Node * target_bytecode)1034 Node* InterpreterAssembler::StarDispatchLookahead(Node* target_bytecode) {
1035   Label do_inline_star(this), done(this);
1036 
1037   Variable var_bytecode(this, MachineType::PointerRepresentation());
1038   var_bytecode.Bind(target_bytecode);
1039 
1040   Node* star_bytecode = IntPtrConstant(static_cast<int>(Bytecode::kStar));
1041   Node* is_star = WordEqual(target_bytecode, star_bytecode);
1042   Branch(is_star, &do_inline_star, &done);
1043 
1044   Bind(&do_inline_star);
1045   {
1046     InlineStar();
1047     var_bytecode.Bind(LoadBytecode(BytecodeOffset()));
1048     Goto(&done);
1049   }
1050   Bind(&done);
1051   return var_bytecode.value();
1052 }
1053 
InlineStar()1054 void InterpreterAssembler::InlineStar() {
1055   Bytecode previous_bytecode = bytecode_;
1056   AccumulatorUse previous_acc_use = accumulator_use_;
1057 
1058   bytecode_ = Bytecode::kStar;
1059   accumulator_use_ = AccumulatorUse::kNone;
1060 
1061   if (FLAG_trace_ignition) {
1062     TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
1063   }
1064   StoreRegister(GetAccumulator(), BytecodeOperandReg(0));
1065 
1066   DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
1067 
1068   Advance();
1069   bytecode_ = previous_bytecode;
1070   accumulator_use_ = previous_acc_use;
1071 }
1072 
Dispatch()1073 Node* InterpreterAssembler::Dispatch() {
1074   Comment("========= Dispatch");
1075   DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
1076   Node* target_offset = Advance();
1077   Node* target_bytecode = LoadBytecode(target_offset);
1078 
1079   if (Bytecodes::IsStarLookahead(bytecode_, operand_scale_)) {
1080     target_bytecode = StarDispatchLookahead(target_bytecode);
1081   }
1082   return DispatchToBytecode(target_bytecode, BytecodeOffset());
1083 }
1084 
DispatchToBytecode(Node * target_bytecode,Node * new_bytecode_offset)1085 Node* InterpreterAssembler::DispatchToBytecode(Node* target_bytecode,
1086                                                Node* new_bytecode_offset) {
1087   if (FLAG_trace_ignition_dispatches) {
1088     TraceBytecodeDispatch(target_bytecode);
1089   }
1090 
1091   Node* target_code_entry =
1092       Load(MachineType::Pointer(), DispatchTableRawPointer(),
1093            WordShl(target_bytecode, IntPtrConstant(kPointerSizeLog2)));
1094 
1095   return DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset);
1096 }
1097 
DispatchToBytecodeHandler(Node * handler,Node * bytecode_offset)1098 Node* InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
1099                                                       Node* bytecode_offset) {
1100   // TODO(ishell): Add CSA::CodeEntryPoint(code).
1101   Node* handler_entry =
1102       IntPtrAdd(BitcastTaggedToWord(handler),
1103                 IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
1104   return DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset);
1105 }
1106 
DispatchToBytecodeHandlerEntry(Node * handler_entry,Node * bytecode_offset)1107 Node* InterpreterAssembler::DispatchToBytecodeHandlerEntry(
1108     Node* handler_entry, Node* bytecode_offset) {
1109   InterpreterDispatchDescriptor descriptor(isolate());
1110   return TailCallBytecodeDispatch(
1111       descriptor, handler_entry, GetAccumulatorUnchecked(), bytecode_offset,
1112       BytecodeArrayTaggedPointer(), DispatchTableRawPointer());
1113 }
1114 
DispatchWide(OperandScale operand_scale)1115 void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
1116   // Dispatching a wide bytecode requires treating the prefix
1117   // bytecode a base pointer into the dispatch table and dispatching
1118   // the bytecode that follows relative to this base.
1119   //
1120   //   Indices 0-255 correspond to bytecodes with operand_scale == 0
1121   //   Indices 256-511 correspond to bytecodes with operand_scale == 1
1122   //   Indices 512-767 correspond to bytecodes with operand_scale == 2
1123   DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
1124   Node* next_bytecode_offset = Advance(1);
1125   Node* next_bytecode = LoadBytecode(next_bytecode_offset);
1126 
1127   if (FLAG_trace_ignition_dispatches) {
1128     TraceBytecodeDispatch(next_bytecode);
1129   }
1130 
1131   Node* base_index;
1132   switch (operand_scale) {
1133     case OperandScale::kDouble:
1134       base_index = IntPtrConstant(1 << kBitsPerByte);
1135       break;
1136     case OperandScale::kQuadruple:
1137       base_index = IntPtrConstant(2 << kBitsPerByte);
1138       break;
1139     default:
1140       UNREACHABLE();
1141       base_index = nullptr;
1142   }
1143   Node* target_index = IntPtrAdd(base_index, next_bytecode);
1144   Node* target_code_entry =
1145       Load(MachineType::Pointer(), DispatchTableRawPointer(),
1146            WordShl(target_index, kPointerSizeLog2));
1147 
1148   DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset);
1149 }
1150 
TruncateTaggedToWord32WithFeedback(Node * context,Node * value,Variable * var_type_feedback)1151 Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
1152     Node* context, Node* value, Variable* var_type_feedback) {
1153   // We might need to loop once due to ToNumber conversion.
1154   Variable var_value(this, MachineRepresentation::kTagged),
1155       var_result(this, MachineRepresentation::kWord32);
1156   Variable* loop_vars[] = {&var_value, var_type_feedback};
1157   Label loop(this, 2, loop_vars), done_loop(this, &var_result);
1158   var_value.Bind(value);
1159   var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kNone));
1160   Goto(&loop);
1161   Bind(&loop);
1162   {
1163     // Load the current {value}.
1164     value = var_value.value();
1165 
1166     // Check if the {value} is a Smi or a HeapObject.
1167     Label if_valueissmi(this), if_valueisnotsmi(this);
1168     Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
1169 
1170     Bind(&if_valueissmi);
1171     {
1172       // Convert the Smi {value}.
1173       var_result.Bind(SmiToWord32(value));
1174       var_type_feedback->Bind(
1175           SmiOr(var_type_feedback->value(),
1176                 SmiConstant(BinaryOperationFeedback::kSignedSmall)));
1177       Goto(&done_loop);
1178     }
1179 
1180     Bind(&if_valueisnotsmi);
1181     {
1182       // Check if {value} is a HeapNumber.
1183       Label if_valueisheapnumber(this),
1184           if_valueisnotheapnumber(this, Label::kDeferred);
1185       Node* value_map = LoadMap(value);
1186       Branch(IsHeapNumberMap(value_map), &if_valueisheapnumber,
1187              &if_valueisnotheapnumber);
1188 
1189       Bind(&if_valueisheapnumber);
1190       {
1191         // Truncate the floating point value.
1192         var_result.Bind(TruncateHeapNumberValueToWord32(value));
1193         var_type_feedback->Bind(
1194             SmiOr(var_type_feedback->value(),
1195                   SmiConstant(BinaryOperationFeedback::kNumber)));
1196         Goto(&done_loop);
1197       }
1198 
1199       Bind(&if_valueisnotheapnumber);
1200       {
1201         // We do not require an Or with earlier feedback here because once we
1202         // convert the value to a number, we cannot reach this path. We can
1203         // only reach this path on the first pass when the feedback is kNone.
1204         CSA_ASSERT(this, SmiEqual(var_type_feedback->value(),
1205                                   SmiConstant(BinaryOperationFeedback::kNone)));
1206 
1207         Label if_valueisoddball(this),
1208             if_valueisnotoddball(this, Label::kDeferred);
1209         Node* is_oddball = Word32Equal(LoadMapInstanceType(value_map),
1210                                        Int32Constant(ODDBALL_TYPE));
1211         Branch(is_oddball, &if_valueisoddball, &if_valueisnotoddball);
1212 
1213         Bind(&if_valueisoddball);
1214         {
1215           // Convert Oddball to a Number and perform checks again.
1216           var_value.Bind(LoadObjectField(value, Oddball::kToNumberOffset));
1217           var_type_feedback->Bind(
1218               SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
1219           Goto(&loop);
1220         }
1221 
1222         Bind(&if_valueisnotoddball);
1223         {
1224           // Convert the {value} to a Number first.
1225           Callable callable = CodeFactory::NonNumberToNumber(isolate());
1226           var_value.Bind(CallStub(callable, context, value));
1227           var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kAny));
1228           Goto(&loop);
1229         }
1230       }
1231     }
1232   }
1233   Bind(&done_loop);
1234   return var_result.value();
1235 }
1236 
UpdateInterruptBudgetOnReturn()1237 void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
1238   // TODO(rmcilroy): Investigate whether it is worth supporting self
1239   // optimization of primitive functions like FullCodegen.
1240 
1241   // Update profiling count by -BytecodeOffset to simulate backedge to start of
1242   // function.
1243   Node* profiling_weight =
1244       Int32Sub(Int32Constant(kHeapObjectTag + BytecodeArray::kHeaderSize),
1245                TruncateWordToWord32(BytecodeOffset()));
1246   UpdateInterruptBudget(profiling_weight, false);
1247 }
1248 
StackCheckTriggeredInterrupt()1249 Node* InterpreterAssembler::StackCheckTriggeredInterrupt() {
1250   Node* sp = LoadStackPointer();
1251   Node* stack_limit = Load(
1252       MachineType::Pointer(),
1253       ExternalConstant(ExternalReference::address_of_stack_limit(isolate())));
1254   return UintPtrLessThan(sp, stack_limit);
1255 }
1256 
LoadOSRNestingLevel()1257 Node* InterpreterAssembler::LoadOSRNestingLevel() {
1258   return LoadObjectField(BytecodeArrayTaggedPointer(),
1259                          BytecodeArray::kOSRNestingLevelOffset,
1260                          MachineType::Int8());
1261 }
1262 
Abort(BailoutReason bailout_reason)1263 void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
1264   disable_stack_check_across_call_ = true;
1265   Node* abort_id = SmiTag(Int32Constant(bailout_reason));
1266   CallRuntime(Runtime::kAbort, GetContext(), abort_id);
1267   disable_stack_check_across_call_ = false;
1268 }
1269 
AbortIfWordNotEqual(Node * lhs,Node * rhs,BailoutReason bailout_reason)1270 void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
1271                                                BailoutReason bailout_reason) {
1272   Label ok(this), abort(this, Label::kDeferred);
1273   Branch(WordEqual(lhs, rhs), &ok, &abort);
1274 
1275   Bind(&abort);
1276   Abort(bailout_reason);
1277   Goto(&ok);
1278 
1279   Bind(&ok);
1280 }
1281 
MaybeDropFrames(Node * context)1282 void InterpreterAssembler::MaybeDropFrames(Node* context) {
1283   Node* restart_fp_address =
1284       ExternalConstant(ExternalReference::debug_restart_fp_address(isolate()));
1285 
1286   Node* restart_fp = Load(MachineType::Pointer(), restart_fp_address);
1287   Node* null = IntPtrConstant(0);
1288 
1289   Label ok(this), drop_frames(this);
1290   Branch(IntPtrEqual(restart_fp, null), &ok, &drop_frames);
1291 
1292   Bind(&drop_frames);
1293   // We don't expect this call to return since the frame dropper tears down
1294   // the stack and jumps into the function on the target frame to restart it.
1295   CallStub(CodeFactory::FrameDropperTrampoline(isolate()), context, restart_fp);
1296   Abort(kUnexpectedReturnFromFrameDropper);
1297   Goto(&ok);
1298 
1299   Bind(&ok);
1300 }
1301 
TraceBytecode(Runtime::FunctionId function_id)1302 void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
1303   CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(),
1304               SmiTag(BytecodeOffset()), GetAccumulatorUnchecked());
1305 }
1306 
TraceBytecodeDispatch(Node * target_bytecode)1307 void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
1308   Node* counters_table = ExternalConstant(
1309       ExternalReference::interpreter_dispatch_counters(isolate()));
1310   Node* source_bytecode_table_index = IntPtrConstant(
1311       static_cast<int>(bytecode_) * (static_cast<int>(Bytecode::kLast) + 1));
1312 
1313   Node* counter_offset =
1314       WordShl(IntPtrAdd(source_bytecode_table_index, target_bytecode),
1315               IntPtrConstant(kPointerSizeLog2));
1316   Node* old_counter =
1317       Load(MachineType::IntPtr(), counters_table, counter_offset);
1318 
1319   Label counter_ok(this), counter_saturated(this, Label::kDeferred);
1320 
1321   Node* counter_reached_max = WordEqual(
1322       old_counter, IntPtrConstant(std::numeric_limits<uintptr_t>::max()));
1323   Branch(counter_reached_max, &counter_saturated, &counter_ok);
1324 
1325   Bind(&counter_ok);
1326   {
1327     Node* new_counter = IntPtrAdd(old_counter, IntPtrConstant(1));
1328     StoreNoWriteBarrier(MachineType::PointerRepresentation(), counters_table,
1329                         counter_offset, new_counter);
1330     Goto(&counter_saturated);
1331   }
1332 
1333   Bind(&counter_saturated);
1334 }
1335 
1336 // static
TargetSupportsUnalignedAccess()1337 bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
1338 #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
1339   return false;
1340 #elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_X87 || \
1341     V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || \
1342     V8_TARGET_ARCH_PPC
1343   return true;
1344 #else
1345 #error "Unknown Architecture"
1346 #endif
1347 }
1348 
RegisterCount()1349 Node* InterpreterAssembler::RegisterCount() {
1350   Node* bytecode_array = LoadRegister(Register::bytecode_array());
1351   Node* frame_size = LoadObjectField(
1352       bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Uint32());
1353   return WordShr(ChangeUint32ToWord(frame_size),
1354                  IntPtrConstant(kPointerSizeLog2));
1355 }
1356 
ExportRegisterFile(Node * array)1357 Node* InterpreterAssembler::ExportRegisterFile(Node* array) {
1358   Node* register_count = RegisterCount();
1359   if (FLAG_debug_code) {
1360     Node* array_size = LoadAndUntagFixedArrayBaseLength(array);
1361     AbortIfWordNotEqual(array_size, register_count,
1362                         kInvalidRegisterFileInGenerator);
1363   }
1364 
1365   Variable var_index(this, MachineType::PointerRepresentation());
1366   var_index.Bind(IntPtrConstant(0));
1367 
1368   // Iterate over register file and write values into array.
1369   // The mapping of register to array index must match that used in
1370   // BytecodeGraphBuilder::VisitResumeGenerator.
1371   Label loop(this, &var_index), done_loop(this);
1372   Goto(&loop);
1373   Bind(&loop);
1374   {
1375     Node* index = var_index.value();
1376     GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
1377 
1378     Node* reg_index = IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
1379     Node* value = LoadRegister(reg_index);
1380 
1381     StoreFixedArrayElement(array, index, value);
1382 
1383     var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
1384     Goto(&loop);
1385   }
1386   Bind(&done_loop);
1387 
1388   return array;
1389 }
1390 
ImportRegisterFile(Node * array)1391 Node* InterpreterAssembler::ImportRegisterFile(Node* array) {
1392   Node* register_count = RegisterCount();
1393   if (FLAG_debug_code) {
1394     Node* array_size = LoadAndUntagFixedArrayBaseLength(array);
1395     AbortIfWordNotEqual(array_size, register_count,
1396                         kInvalidRegisterFileInGenerator);
1397   }
1398 
1399   Variable var_index(this, MachineType::PointerRepresentation());
1400   var_index.Bind(IntPtrConstant(0));
1401 
1402   // Iterate over array and write values into register file.  Also erase the
1403   // array contents to not keep them alive artificially.
1404   Label loop(this, &var_index), done_loop(this);
1405   Goto(&loop);
1406   Bind(&loop);
1407   {
1408     Node* index = var_index.value();
1409     GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
1410 
1411     Node* value = LoadFixedArrayElement(array, index);
1412 
1413     Node* reg_index = IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
1414     StoreRegister(value, reg_index);
1415 
1416     StoreFixedArrayElement(array, index, StaleRegisterConstant());
1417 
1418     var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
1419     Goto(&loop);
1420   }
1421   Bind(&done_loop);
1422 
1423   return array;
1424 }
1425 
1426 }  // namespace interpreter
1427 }  // namespace internal
1428 }  // namespace v8
1429