• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/interpreter/interpreter-assembler.h"
6 
7 #include <limits>
8 #include <ostream>
9 
10 #include "src/codegen/code-factory.h"
11 #include "src/codegen/interface-descriptors.h"
12 #include "src/codegen/machine-type.h"
13 #include "src/execution/frames.h"
14 #include "src/interpreter/bytecodes.h"
15 #include "src/interpreter/interpreter.h"
16 #include "src/objects/objects-inl.h"
17 #include "src/zone/zone.h"
18 
19 namespace v8 {
20 namespace internal {
21 namespace interpreter {
22 
23 using compiler::CodeAssemblerState;
24 
InterpreterAssembler(CodeAssemblerState * state,Bytecode bytecode,OperandScale operand_scale)25 InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
26                                            Bytecode bytecode,
27                                            OperandScale operand_scale)
28     : CodeStubAssembler(state),
29       bytecode_(bytecode),
30       operand_scale_(operand_scale),
31       TVARIABLE_CONSTRUCTOR(interpreted_frame_pointer_),
32       TVARIABLE_CONSTRUCTOR(bytecode_array_,
33                             Parameter<BytecodeArray>(
34                                 InterpreterDispatchDescriptor::kBytecodeArray)),
35       TVARIABLE_CONSTRUCTOR(
36           bytecode_offset_,
37           UncheckedParameter<IntPtrT>(
38               InterpreterDispatchDescriptor::kBytecodeOffset)),
39       TVARIABLE_CONSTRUCTOR(dispatch_table_,
40                             UncheckedParameter<ExternalReference>(
41                                 InterpreterDispatchDescriptor::kDispatchTable)),
42       TVARIABLE_CONSTRUCTOR(
43           accumulator_,
44           Parameter<Object>(InterpreterDispatchDescriptor::kAccumulator)),
45       accumulator_use_(AccumulatorUse::kNone),
46       made_call_(false),
47       reloaded_frame_ptr_(false),
48       bytecode_array_valid_(true) {
49 #ifdef V8_TRACE_IGNITION
50   TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
51 #endif
52   RegisterCallGenerationCallbacks([this] { CallPrologue(); },
53                                   [this] { CallEpilogue(); });
54 
55   // Save the bytecode offset immediately if bytecode will make a call along
56   // the critical path, or it is a return bytecode.
57   if (Bytecodes::MakesCallAlongCriticalPath(bytecode) ||
58       Bytecodes::Returns(bytecode)) {
59     SaveBytecodeOffset();
60   }
61 }
62 
~InterpreterAssembler()63 InterpreterAssembler::~InterpreterAssembler() {
64   // If the following check fails the handler does not use the
65   // accumulator in the way described in the bytecode definitions in
66   // bytecodes.h.
67   DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
68   UnregisterCallGenerationCallbacks();
69 }
70 
GetInterpretedFramePointer()71 TNode<RawPtrT> InterpreterAssembler::GetInterpretedFramePointer() {
72   if (!interpreted_frame_pointer_.IsBound()) {
73     interpreted_frame_pointer_ = LoadParentFramePointer();
74   } else if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
75              !reloaded_frame_ptr_) {
76     interpreted_frame_pointer_ = LoadParentFramePointer();
77     reloaded_frame_ptr_ = true;
78   }
79   return interpreted_frame_pointer_.value();
80 }
81 
BytecodeOffset()82 TNode<IntPtrT> InterpreterAssembler::BytecodeOffset() {
83   if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
84       (bytecode_offset_.value() ==
85        UncheckedParameter<IntPtrT>(
86            InterpreterDispatchDescriptor::kBytecodeOffset))) {
87     bytecode_offset_ = ReloadBytecodeOffset();
88   }
89   return bytecode_offset_.value();
90 }
91 
ReloadBytecodeOffset()92 TNode<IntPtrT> InterpreterAssembler::ReloadBytecodeOffset() {
93   TNode<IntPtrT> offset = LoadAndUntagRegister(Register::bytecode_offset());
94   if (operand_scale() != OperandScale::kSingle) {
95     // Add one to the offset such that it points to the actual bytecode rather
96     // than the Wide / ExtraWide prefix bytecode.
97     offset = IntPtrAdd(offset, IntPtrConstant(1));
98   }
99   return offset;
100 }
101 
SaveBytecodeOffset()102 void InterpreterAssembler::SaveBytecodeOffset() {
103   TNode<IntPtrT> bytecode_offset = BytecodeOffset();
104   if (operand_scale() != OperandScale::kSingle) {
105     // Subtract one from the bytecode_offset such that it points to the Wide /
106     // ExtraWide prefix bytecode.
107     bytecode_offset = IntPtrSub(BytecodeOffset(), IntPtrConstant(1));
108   }
109   int store_offset =
110       Register::bytecode_offset().ToOperand() * kSystemPointerSize;
111   TNode<RawPtrT> base = GetInterpretedFramePointer();
112 
113   if (SmiValuesAre32Bits()) {
114     int zero_offset = store_offset + 4;
115     int payload_offset = store_offset;
116 #if V8_TARGET_LITTLE_ENDIAN
117     std::swap(zero_offset, payload_offset);
118 #endif
119     StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
120                         IntPtrConstant(zero_offset), Int32Constant(0));
121     StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
122                         IntPtrConstant(payload_offset),
123                         TruncateIntPtrToInt32(bytecode_offset));
124   } else {
125     StoreFullTaggedNoWriteBarrier(base, IntPtrConstant(store_offset),
126                                   SmiTag(bytecode_offset));
127   }
128 }
129 
BytecodeArrayTaggedPointer()130 TNode<BytecodeArray> InterpreterAssembler::BytecodeArrayTaggedPointer() {
131   // Force a re-load of the bytecode array after every call in case the debugger
132   // has been activated.
133   if (!bytecode_array_valid_) {
134     bytecode_array_ = CAST(LoadRegister(Register::bytecode_array()));
135     bytecode_array_valid_ = true;
136   }
137   return bytecode_array_.value();
138 }
139 
DispatchTablePointer()140 TNode<ExternalReference> InterpreterAssembler::DispatchTablePointer() {
141   if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
142       (dispatch_table_.value() ==
143        UncheckedParameter<ExternalReference>(
144            InterpreterDispatchDescriptor::kDispatchTable))) {
145     dispatch_table_ = ExternalConstant(
146         ExternalReference::interpreter_dispatch_table_address(isolate()));
147   }
148   return dispatch_table_.value();
149 }
150 
GetAccumulatorUnchecked()151 TNode<Object> InterpreterAssembler::GetAccumulatorUnchecked() {
152   return accumulator_.value();
153 }
154 
GetAccumulator()155 TNode<Object> InterpreterAssembler::GetAccumulator() {
156   DCHECK(Bytecodes::ReadsAccumulator(bytecode_));
157   accumulator_use_ = accumulator_use_ | AccumulatorUse::kRead;
158   return TaggedPoisonOnSpeculation(GetAccumulatorUnchecked());
159 }
160 
SetAccumulator(TNode<Object> value)161 void InterpreterAssembler::SetAccumulator(TNode<Object> value) {
162   DCHECK(Bytecodes::WritesAccumulator(bytecode_));
163   accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
164   accumulator_ = value;
165 }
166 
GetContext()167 TNode<Context> InterpreterAssembler::GetContext() {
168   return CAST(LoadRegister(Register::current_context()));
169 }
170 
SetContext(TNode<Context> value)171 void InterpreterAssembler::SetContext(TNode<Context> value) {
172   StoreRegister(value, Register::current_context());
173 }
174 
GetContextAtDepth(TNode<Context> context,TNode<Uint32T> depth)175 TNode<Context> InterpreterAssembler::GetContextAtDepth(TNode<Context> context,
176                                                        TNode<Uint32T> depth) {
177   TVARIABLE(Context, cur_context, context);
178   TVARIABLE(Uint32T, cur_depth, depth);
179 
180   Label context_found(this);
181 
182   Label context_search(this, {&cur_depth, &cur_context});
183 
184   // Fast path if the depth is 0.
185   Branch(Word32Equal(depth, Int32Constant(0)), &context_found, &context_search);
186 
187   // Loop until the depth is 0.
188   BIND(&context_search);
189   {
190     cur_depth = Unsigned(Int32Sub(cur_depth.value(), Int32Constant(1)));
191     cur_context =
192         CAST(LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
193 
194     Branch(Word32Equal(cur_depth.value(), Int32Constant(0)), &context_found,
195            &context_search);
196   }
197 
198   BIND(&context_found);
199   return cur_context.value();
200 }
201 
GotoIfHasContextExtensionUpToDepth(TNode<Context> context,TNode<Uint32T> depth,Label * target)202 void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(
203     TNode<Context> context, TNode<Uint32T> depth, Label* target) {
204   TVARIABLE(Context, cur_context, context);
205   TVARIABLE(Uint32T, cur_depth, depth);
206 
207   Label context_search(this, {&cur_depth, &cur_context});
208   Label no_extension(this);
209 
210   // Loop until the depth is 0.
211   Goto(&context_search);
212   BIND(&context_search);
213   {
214     // Check if context has an extension slot.
215     TNode<BoolT> has_extension =
216         LoadScopeInfoHasExtensionField(LoadScopeInfo(cur_context.value()));
217     GotoIfNot(has_extension, &no_extension);
218 
219     // Jump to the target if the extension slot is not an undefined value.
220     TNode<Object> extension_slot =
221         LoadContextElement(cur_context.value(), Context::EXTENSION_INDEX);
222     Branch(TaggedNotEqual(extension_slot, UndefinedConstant()), target,
223            &no_extension);
224 
225     BIND(&no_extension);
226     {
227       cur_depth = Unsigned(Int32Sub(cur_depth.value(), Int32Constant(1)));
228       cur_context = CAST(
229           LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
230 
231       GotoIf(Word32NotEqual(cur_depth.value(), Int32Constant(0)),
232              &context_search);
233     }
234   }
235 }
236 
RegisterLocation(TNode<IntPtrT> reg_index)237 TNode<IntPtrT> InterpreterAssembler::RegisterLocation(
238     TNode<IntPtrT> reg_index) {
239   return Signed(WordPoisonOnSpeculation(
240       IntPtrAdd(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index))));
241 }
242 
RegisterLocation(Register reg)243 TNode<IntPtrT> InterpreterAssembler::RegisterLocation(Register reg) {
244   return RegisterLocation(IntPtrConstant(reg.ToOperand()));
245 }
246 
RegisterFrameOffset(TNode<IntPtrT> index)247 TNode<IntPtrT> InterpreterAssembler::RegisterFrameOffset(TNode<IntPtrT> index) {
248   return TimesSystemPointerSize(index);
249 }
250 
LoadRegister(TNode<IntPtrT> reg_index)251 TNode<Object> InterpreterAssembler::LoadRegister(TNode<IntPtrT> reg_index) {
252   return LoadFullTagged(GetInterpretedFramePointer(),
253                         RegisterFrameOffset(reg_index),
254                         LoadSensitivity::kCritical);
255 }
256 
LoadRegister(Register reg)257 TNode<Object> InterpreterAssembler::LoadRegister(Register reg) {
258   return LoadFullTagged(GetInterpretedFramePointer(),
259                         IntPtrConstant(reg.ToOperand() * kSystemPointerSize));
260 }
261 
LoadAndUntagRegister(Register reg)262 TNode<IntPtrT> InterpreterAssembler::LoadAndUntagRegister(Register reg) {
263   TNode<RawPtrT> base = GetInterpretedFramePointer();
264   int index = reg.ToOperand() * kSystemPointerSize;
265   if (SmiValuesAre32Bits()) {
266 #if V8_TARGET_LITTLE_ENDIAN
267     index += 4;
268 #endif
269     return ChangeInt32ToIntPtr(Load<Int32T>(base, IntPtrConstant(index)));
270   } else {
271     return SmiToIntPtr(CAST(LoadFullTagged(base, IntPtrConstant(index))));
272   }
273 }
274 
LoadRegisterAtOperandIndex(int operand_index)275 TNode<Object> InterpreterAssembler::LoadRegisterAtOperandIndex(
276     int operand_index) {
277   return LoadRegister(
278       BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
279 }
280 
281 std::pair<TNode<Object>, TNode<Object>>
LoadRegisterPairAtOperandIndex(int operand_index)282 InterpreterAssembler::LoadRegisterPairAtOperandIndex(int operand_index) {
283   DCHECK_EQ(OperandType::kRegPair,
284             Bytecodes::GetOperandType(bytecode_, operand_index));
285   TNode<IntPtrT> first_reg_index =
286       BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
287   TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
288   return std::make_pair(LoadRegister(first_reg_index),
289                         LoadRegister(second_reg_index));
290 }
291 
292 InterpreterAssembler::RegListNodePair
GetRegisterListAtOperandIndex(int operand_index)293 InterpreterAssembler::GetRegisterListAtOperandIndex(int operand_index) {
294   DCHECK(Bytecodes::IsRegisterListOperandType(
295       Bytecodes::GetOperandType(bytecode_, operand_index)));
296   DCHECK_EQ(OperandType::kRegCount,
297             Bytecodes::GetOperandType(bytecode_, operand_index + 1));
298   TNode<IntPtrT> base_reg = RegisterLocation(
299       BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
300   TNode<Uint32T> reg_count = BytecodeOperandCount(operand_index + 1);
301   return RegListNodePair(base_reg, reg_count);
302 }
303 
LoadRegisterFromRegisterList(const RegListNodePair & reg_list,int index)304 TNode<Object> InterpreterAssembler::LoadRegisterFromRegisterList(
305     const RegListNodePair& reg_list, int index) {
306   TNode<IntPtrT> location = RegisterLocationInRegisterList(reg_list, index);
307   // Location is already poisoned on speculation, so no need to poison here.
308   return LoadFullTagged(location);
309 }
310 
RegisterLocationInRegisterList(const RegListNodePair & reg_list,int index)311 TNode<IntPtrT> InterpreterAssembler::RegisterLocationInRegisterList(
312     const RegListNodePair& reg_list, int index) {
313   CSA_ASSERT(this,
314              Uint32GreaterThan(reg_list.reg_count(), Int32Constant(index)));
315   TNode<IntPtrT> offset = RegisterFrameOffset(IntPtrConstant(index));
316   // Register indexes are negative, so subtract index from base location to get
317   // location.
318   return Signed(IntPtrSub(reg_list.base_reg_location(), offset));
319 }
320 
StoreRegister(TNode<Object> value,Register reg)321 void InterpreterAssembler::StoreRegister(TNode<Object> value, Register reg) {
322   StoreFullTaggedNoWriteBarrier(
323       GetInterpretedFramePointer(),
324       IntPtrConstant(reg.ToOperand() * kSystemPointerSize), value);
325 }
326 
StoreRegister(TNode<Object> value,TNode<IntPtrT> reg_index)327 void InterpreterAssembler::StoreRegister(TNode<Object> value,
328                                          TNode<IntPtrT> reg_index) {
329   StoreFullTaggedNoWriteBarrier(GetInterpretedFramePointer(),
330                                 RegisterFrameOffset(reg_index), value);
331 }
332 
StoreRegisterAtOperandIndex(TNode<Object> value,int operand_index)333 void InterpreterAssembler::StoreRegisterAtOperandIndex(TNode<Object> value,
334                                                        int operand_index) {
335   StoreRegister(value,
336                 BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
337 }
338 
StoreRegisterPairAtOperandIndex(TNode<Object> value1,TNode<Object> value2,int operand_index)339 void InterpreterAssembler::StoreRegisterPairAtOperandIndex(TNode<Object> value1,
340                                                            TNode<Object> value2,
341                                                            int operand_index) {
342   DCHECK_EQ(OperandType::kRegOutPair,
343             Bytecodes::GetOperandType(bytecode_, operand_index));
344   TNode<IntPtrT> first_reg_index =
345       BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
346   StoreRegister(value1, first_reg_index);
347   TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
348   StoreRegister(value2, second_reg_index);
349 }
350 
StoreRegisterTripleAtOperandIndex(TNode<Object> value1,TNode<Object> value2,TNode<Object> value3,int operand_index)351 void InterpreterAssembler::StoreRegisterTripleAtOperandIndex(
352     TNode<Object> value1, TNode<Object> value2, TNode<Object> value3,
353     int operand_index) {
354   DCHECK_EQ(OperandType::kRegOutTriple,
355             Bytecodes::GetOperandType(bytecode_, operand_index));
356   TNode<IntPtrT> first_reg_index =
357       BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
358   StoreRegister(value1, first_reg_index);
359   TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
360   StoreRegister(value2, second_reg_index);
361   TNode<IntPtrT> third_reg_index = NextRegister(second_reg_index);
362   StoreRegister(value3, third_reg_index);
363 }
364 
NextRegister(TNode<IntPtrT> reg_index)365 TNode<IntPtrT> InterpreterAssembler::NextRegister(TNode<IntPtrT> reg_index) {
366   // Register indexes are negative, so the next index is minus one.
367   return Signed(IntPtrAdd(reg_index, IntPtrConstant(-1)));
368 }
369 
OperandOffset(int operand_index)370 TNode<IntPtrT> InterpreterAssembler::OperandOffset(int operand_index) {
371   return IntPtrConstant(
372       Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()));
373 }
374 
BytecodeOperandUnsignedByte(int operand_index,LoadSensitivity needs_poisoning)375 TNode<Uint8T> InterpreterAssembler::BytecodeOperandUnsignedByte(
376     int operand_index, LoadSensitivity needs_poisoning) {
377   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
378   DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
379                                     bytecode_, operand_index, operand_scale()));
380   TNode<IntPtrT> operand_offset = OperandOffset(operand_index);
381   return Load<Uint8T>(BytecodeArrayTaggedPointer(),
382                       IntPtrAdd(BytecodeOffset(), operand_offset),
383                       needs_poisoning);
384 }
385 
BytecodeOperandSignedByte(int operand_index,LoadSensitivity needs_poisoning)386 TNode<Int8T> InterpreterAssembler::BytecodeOperandSignedByte(
387     int operand_index, LoadSensitivity needs_poisoning) {
388   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
389   DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
390                                     bytecode_, operand_index, operand_scale()));
391   TNode<IntPtrT> operand_offset = OperandOffset(operand_index);
392   return Load<Int8T>(BytecodeArrayTaggedPointer(),
393                      IntPtrAdd(BytecodeOffset(), operand_offset),
394                      needs_poisoning);
395 }
396 
BytecodeOperandReadUnaligned(int relative_offset,MachineType result_type,LoadSensitivity needs_poisoning)397 TNode<Word32T> InterpreterAssembler::BytecodeOperandReadUnaligned(
398     int relative_offset, MachineType result_type,
399     LoadSensitivity needs_poisoning) {
400   static const int kMaxCount = 4;
401   DCHECK(!TargetSupportsUnalignedAccess());
402 
403   int count;
404   switch (result_type.representation()) {
405     case MachineRepresentation::kWord16:
406       count = 2;
407       break;
408     case MachineRepresentation::kWord32:
409       count = 4;
410       break;
411     default:
412       UNREACHABLE();
413   }
414   MachineType msb_type =
415       result_type.IsSigned() ? MachineType::Int8() : MachineType::Uint8();
416 
417 #if V8_TARGET_LITTLE_ENDIAN
418   const int kStep = -1;
419   int msb_offset = count - 1;
420 #elif V8_TARGET_BIG_ENDIAN
421   const int kStep = 1;
422   int msb_offset = 0;
423 #else
424 #error "Unknown Architecture"
425 #endif
426 
427   // Read the most signicant bytecode into bytes[0] and then in order
428   // down to least significant in bytes[count - 1].
429   DCHECK_LE(count, kMaxCount);
430   TNode<Word32T> bytes[kMaxCount];
431   for (int i = 0; i < count; i++) {
432     MachineType machine_type = (i == 0) ? msb_type : MachineType::Uint8();
433     TNode<IntPtrT> offset =
434         IntPtrConstant(relative_offset + msb_offset + i * kStep);
435     TNode<IntPtrT> array_offset = IntPtrAdd(BytecodeOffset(), offset);
436     bytes[i] =
437         UncheckedCast<Word32T>(Load(machine_type, BytecodeArrayTaggedPointer(),
438                                     array_offset, needs_poisoning));
439   }
440 
441   // Pack LSB to MSB.
442   TNode<Word32T> result = bytes[--count];
443   for (int i = 1; --count >= 0; i++) {
444     TNode<Int32T> shift = Int32Constant(i * kBitsPerByte);
445     TNode<Word32T> value = Word32Shl(bytes[count], shift);
446     result = Word32Or(value, result);
447   }
448   return result;
449 }
450 
BytecodeOperandUnsignedShort(int operand_index,LoadSensitivity needs_poisoning)451 TNode<Uint16T> InterpreterAssembler::BytecodeOperandUnsignedShort(
452     int operand_index, LoadSensitivity needs_poisoning) {
453   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
454   DCHECK_EQ(
455       OperandSize::kShort,
456       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
457   int operand_offset =
458       Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
459   if (TargetSupportsUnalignedAccess()) {
460     return Load<Uint16T>(
461         BytecodeArrayTaggedPointer(),
462         IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
463         needs_poisoning);
464   } else {
465     return UncheckedCast<Uint16T>(BytecodeOperandReadUnaligned(
466         operand_offset, MachineType::Uint16(), needs_poisoning));
467   }
468 }
469 
BytecodeOperandSignedShort(int operand_index,LoadSensitivity needs_poisoning)470 TNode<Int16T> InterpreterAssembler::BytecodeOperandSignedShort(
471     int operand_index, LoadSensitivity needs_poisoning) {
472   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
473   DCHECK_EQ(
474       OperandSize::kShort,
475       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
476   int operand_offset =
477       Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
478   if (TargetSupportsUnalignedAccess()) {
479     return Load<Int16T>(
480         BytecodeArrayTaggedPointer(),
481         IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
482         needs_poisoning);
483   } else {
484     return UncheckedCast<Int16T>(BytecodeOperandReadUnaligned(
485         operand_offset, MachineType::Int16(), needs_poisoning));
486   }
487 }
488 
BytecodeOperandUnsignedQuad(int operand_index,LoadSensitivity needs_poisoning)489 TNode<Uint32T> InterpreterAssembler::BytecodeOperandUnsignedQuad(
490     int operand_index, LoadSensitivity needs_poisoning) {
491   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
492   DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
493                                     bytecode_, operand_index, operand_scale()));
494   int operand_offset =
495       Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
496   if (TargetSupportsUnalignedAccess()) {
497     return Load<Uint32T>(
498         BytecodeArrayTaggedPointer(),
499         IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
500         needs_poisoning);
501   } else {
502     return UncheckedCast<Uint32T>(BytecodeOperandReadUnaligned(
503         operand_offset, MachineType::Uint32(), needs_poisoning));
504   }
505 }
506 
BytecodeOperandSignedQuad(int operand_index,LoadSensitivity needs_poisoning)507 TNode<Int32T> InterpreterAssembler::BytecodeOperandSignedQuad(
508     int operand_index, LoadSensitivity needs_poisoning) {
509   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
510   DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
511                                     bytecode_, operand_index, operand_scale()));
512   int operand_offset =
513       Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
514   if (TargetSupportsUnalignedAccess()) {
515     return Load<Int32T>(
516         BytecodeArrayTaggedPointer(),
517         IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
518         needs_poisoning);
519   } else {
520     return UncheckedCast<Int32T>(BytecodeOperandReadUnaligned(
521         operand_offset, MachineType::Int32(), needs_poisoning));
522   }
523 }
524 
BytecodeSignedOperand(int operand_index,OperandSize operand_size,LoadSensitivity needs_poisoning)525 TNode<Int32T> InterpreterAssembler::BytecodeSignedOperand(
526     int operand_index, OperandSize operand_size,
527     LoadSensitivity needs_poisoning) {
528   DCHECK(!Bytecodes::IsUnsignedOperandType(
529       Bytecodes::GetOperandType(bytecode_, operand_index)));
530   switch (operand_size) {
531     case OperandSize::kByte:
532       return BytecodeOperandSignedByte(operand_index, needs_poisoning);
533     case OperandSize::kShort:
534       return BytecodeOperandSignedShort(operand_index, needs_poisoning);
535     case OperandSize::kQuad:
536       return BytecodeOperandSignedQuad(operand_index, needs_poisoning);
537     case OperandSize::kNone:
538       UNREACHABLE();
539   }
540 }
541 
BytecodeUnsignedOperand(int operand_index,OperandSize operand_size,LoadSensitivity needs_poisoning)542 TNode<Uint32T> InterpreterAssembler::BytecodeUnsignedOperand(
543     int operand_index, OperandSize operand_size,
544     LoadSensitivity needs_poisoning) {
545   DCHECK(Bytecodes::IsUnsignedOperandType(
546       Bytecodes::GetOperandType(bytecode_, operand_index)));
547   switch (operand_size) {
548     case OperandSize::kByte:
549       return BytecodeOperandUnsignedByte(operand_index, needs_poisoning);
550     case OperandSize::kShort:
551       return BytecodeOperandUnsignedShort(operand_index, needs_poisoning);
552     case OperandSize::kQuad:
553       return BytecodeOperandUnsignedQuad(operand_index, needs_poisoning);
554     case OperandSize::kNone:
555       UNREACHABLE();
556   }
557 }
558 
BytecodeOperandCount(int operand_index)559 TNode<Uint32T> InterpreterAssembler::BytecodeOperandCount(int operand_index) {
560   DCHECK_EQ(OperandType::kRegCount,
561             Bytecodes::GetOperandType(bytecode_, operand_index));
562   OperandSize operand_size =
563       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
564   return BytecodeUnsignedOperand(operand_index, operand_size);
565 }
566 
BytecodeOperandFlag(int operand_index)567 TNode<Uint32T> InterpreterAssembler::BytecodeOperandFlag(int operand_index) {
568   DCHECK_EQ(OperandType::kFlag8,
569             Bytecodes::GetOperandType(bytecode_, operand_index));
570   OperandSize operand_size =
571       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
572   DCHECK_EQ(operand_size, OperandSize::kByte);
573   return BytecodeUnsignedOperand(operand_index, operand_size);
574 }
575 
BytecodeOperandUImm(int operand_index)576 TNode<Uint32T> InterpreterAssembler::BytecodeOperandUImm(int operand_index) {
577   DCHECK_EQ(OperandType::kUImm,
578             Bytecodes::GetOperandType(bytecode_, operand_index));
579   OperandSize operand_size =
580       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
581   return BytecodeUnsignedOperand(operand_index, operand_size);
582 }
583 
BytecodeOperandUImmWord(int operand_index)584 TNode<UintPtrT> InterpreterAssembler::BytecodeOperandUImmWord(
585     int operand_index) {
586   return ChangeUint32ToWord(BytecodeOperandUImm(operand_index));
587 }
588 
BytecodeOperandUImmSmi(int operand_index)589 TNode<Smi> InterpreterAssembler::BytecodeOperandUImmSmi(int operand_index) {
590   return SmiFromUint32(BytecodeOperandUImm(operand_index));
591 }
592 
BytecodeOperandImm(int operand_index)593 TNode<Int32T> InterpreterAssembler::BytecodeOperandImm(int operand_index) {
594   DCHECK_EQ(OperandType::kImm,
595             Bytecodes::GetOperandType(bytecode_, operand_index));
596   OperandSize operand_size =
597       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
598   return BytecodeSignedOperand(operand_index, operand_size);
599 }
600 
BytecodeOperandImmIntPtr(int operand_index)601 TNode<IntPtrT> InterpreterAssembler::BytecodeOperandImmIntPtr(
602     int operand_index) {
603   return ChangeInt32ToIntPtr(BytecodeOperandImm(operand_index));
604 }
605 
BytecodeOperandImmSmi(int operand_index)606 TNode<Smi> InterpreterAssembler::BytecodeOperandImmSmi(int operand_index) {
607   return SmiFromInt32(BytecodeOperandImm(operand_index));
608 }
609 
BytecodeOperandIdxInt32(int operand_index)610 TNode<Uint32T> InterpreterAssembler::BytecodeOperandIdxInt32(
611     int operand_index) {
612   DCHECK_EQ(OperandType::kIdx,
613             Bytecodes::GetOperandType(bytecode_, operand_index));
614   OperandSize operand_size =
615       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
616   return BytecodeUnsignedOperand(operand_index, operand_size);
617 }
618 
BytecodeOperandIdx(int operand_index)619 TNode<UintPtrT> InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
620   return ChangeUint32ToWord(BytecodeOperandIdxInt32(operand_index));
621 }
622 
BytecodeOperandIdxSmi(int operand_index)623 TNode<Smi> InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) {
624   return SmiTag(Signed(BytecodeOperandIdx(operand_index)));
625 }
626 
BytecodeOperandIdxTaggedIndex(int operand_index)627 TNode<TaggedIndex> InterpreterAssembler::BytecodeOperandIdxTaggedIndex(
628     int operand_index) {
629   TNode<IntPtrT> index =
630       ChangeInt32ToIntPtr(Signed(BytecodeOperandIdxInt32(operand_index)));
631   return IntPtrToTaggedIndex(index);
632 }
633 
BytecodeOperandConstantPoolIdx(int operand_index,LoadSensitivity needs_poisoning)634 TNode<UintPtrT> InterpreterAssembler::BytecodeOperandConstantPoolIdx(
635     int operand_index, LoadSensitivity needs_poisoning) {
636   DCHECK_EQ(OperandType::kIdx,
637             Bytecodes::GetOperandType(bytecode_, operand_index));
638   OperandSize operand_size =
639       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
640   return ChangeUint32ToWord(
641       BytecodeUnsignedOperand(operand_index, operand_size, needs_poisoning));
642 }
643 
BytecodeOperandReg(int operand_index,LoadSensitivity needs_poisoning)644 TNode<IntPtrT> InterpreterAssembler::BytecodeOperandReg(
645     int operand_index, LoadSensitivity needs_poisoning) {
646   DCHECK(Bytecodes::IsRegisterOperandType(
647       Bytecodes::GetOperandType(bytecode_, operand_index)));
648   OperandSize operand_size =
649       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
650   return ChangeInt32ToIntPtr(
651       BytecodeSignedOperand(operand_index, operand_size, needs_poisoning));
652 }
653 
BytecodeOperandRuntimeId(int operand_index)654 TNode<Uint32T> InterpreterAssembler::BytecodeOperandRuntimeId(
655     int operand_index) {
656   DCHECK_EQ(OperandType::kRuntimeId,
657             Bytecodes::GetOperandType(bytecode_, operand_index));
658   OperandSize operand_size =
659       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
660   DCHECK_EQ(operand_size, OperandSize::kShort);
661   return BytecodeUnsignedOperand(operand_index, operand_size);
662 }
663 
BytecodeOperandNativeContextIndex(int operand_index)664 TNode<UintPtrT> InterpreterAssembler::BytecodeOperandNativeContextIndex(
665     int operand_index) {
666   DCHECK_EQ(OperandType::kNativeContextIndex,
667             Bytecodes::GetOperandType(bytecode_, operand_index));
668   OperandSize operand_size =
669       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
670   return ChangeUint32ToWord(
671       BytecodeUnsignedOperand(operand_index, operand_size));
672 }
673 
BytecodeOperandIntrinsicId(int operand_index)674 TNode<Uint32T> InterpreterAssembler::BytecodeOperandIntrinsicId(
675     int operand_index) {
676   DCHECK_EQ(OperandType::kIntrinsicId,
677             Bytecodes::GetOperandType(bytecode_, operand_index));
678   OperandSize operand_size =
679       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
680   DCHECK_EQ(operand_size, OperandSize::kByte);
681   return BytecodeUnsignedOperand(operand_index, operand_size);
682 }
683 
LoadConstantPoolEntry(TNode<WordT> index)684 TNode<Object> InterpreterAssembler::LoadConstantPoolEntry(TNode<WordT> index) {
685   TNode<FixedArray> constant_pool = CAST(LoadObjectField(
686       BytecodeArrayTaggedPointer(), BytecodeArray::kConstantPoolOffset));
687   return UnsafeLoadFixedArrayElement(constant_pool,
688                                      UncheckedCast<IntPtrT>(index), 0,
689                                      LoadSensitivity::kCritical);
690 }
691 
LoadAndUntagConstantPoolEntry(TNode<WordT> index)692 TNode<IntPtrT> InterpreterAssembler::LoadAndUntagConstantPoolEntry(
693     TNode<WordT> index) {
694   return SmiUntag(CAST(LoadConstantPoolEntry(index)));
695 }
696 
LoadConstantPoolEntryAtOperandIndex(int operand_index)697 TNode<Object> InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex(
698     int operand_index) {
699   TNode<UintPtrT> index =
700       BytecodeOperandConstantPoolIdx(operand_index, LoadSensitivity::kSafe);
701   return LoadConstantPoolEntry(index);
702 }
703 
704 TNode<IntPtrT>
LoadAndUntagConstantPoolEntryAtOperandIndex(int operand_index)705 InterpreterAssembler::LoadAndUntagConstantPoolEntryAtOperandIndex(
706     int operand_index) {
707   return SmiUntag(CAST(LoadConstantPoolEntryAtOperandIndex(operand_index)));
708 }
709 
LoadFeedbackVector()710 TNode<HeapObject> InterpreterAssembler::LoadFeedbackVector() {
711   TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
712   return CodeStubAssembler::LoadFeedbackVector(function);
713 }
714 
CallPrologue()715 void InterpreterAssembler::CallPrologue() {
716   if (!Bytecodes::MakesCallAlongCriticalPath(bytecode_)) {
717     // Bytecodes that make a call along the critical path save the bytecode
718     // offset in the bytecode handler's prologue. For other bytecodes, if
719     // there are multiple calls in the bytecode handler, you need to spill
720     // before each of them, unless SaveBytecodeOffset has explicitly been called
721     // in a path that dominates _all_ of those calls (which we don't track).
722     SaveBytecodeOffset();
723   }
724 
725   bytecode_array_valid_ = false;
726   made_call_ = true;
727 }
728 
CallEpilogue()729 void InterpreterAssembler::CallEpilogue() {
730 }
731 
CallJSAndDispatch(TNode<Object> function,TNode<Context> context,const RegListNodePair & args,ConvertReceiverMode receiver_mode)732 void InterpreterAssembler::CallJSAndDispatch(
733     TNode<Object> function, TNode<Context> context, const RegListNodePair& args,
734     ConvertReceiverMode receiver_mode) {
735   DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
736   DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) ||
737          bytecode_ == Bytecode::kInvokeIntrinsic);
738   DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
739 
740   TNode<Word32T> args_count;
741   if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
742     // The receiver is implied, so it is not in the argument list.
743     args_count = args.reg_count();
744   } else {
745     // Subtract the receiver from the argument count.
746     TNode<Int32T> receiver_count = Int32Constant(1);
747     args_count = Int32Sub(args.reg_count(), receiver_count);
748   }
749 
750   Callable callable = CodeFactory::InterpreterPushArgsThenCall(
751       isolate(), receiver_mode, InterpreterPushArgsMode::kOther);
752   TNode<Code> code_target = HeapConstant(callable.code());
753 
754   TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
755                                    args_count, args.base_reg_location(),
756                                    function);
757   // TailCallStubThenDispatch updates accumulator with result.
758   accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
759 }
760 
761 template <class... TArgs>
CallJSAndDispatch(TNode<Object> function,TNode<Context> context,TNode<Word32T> arg_count,ConvertReceiverMode receiver_mode,TArgs...args)762 void InterpreterAssembler::CallJSAndDispatch(TNode<Object> function,
763                                              TNode<Context> context,
764                                              TNode<Word32T> arg_count,
765                                              ConvertReceiverMode receiver_mode,
766                                              TArgs... args) {
767   DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
768   DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) ||
769          bytecode_ == Bytecode::kInvokeIntrinsic);
770   DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
771   Callable callable = CodeFactory::Call(isolate());
772   TNode<Code> code_target = HeapConstant(callable.code());
773 
774   if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
775     // The first argument parameter (the receiver) is implied to be undefined.
776     TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
777                                      context, function, arg_count, args...,
778                                      UndefinedConstant());
779   } else {
780     TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
781                                      context, function, arg_count, args...);
782   }
783   // TailCallStubThenDispatch updates accumulator with result.
784   accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
785 }
786 
787 // Instantiate CallJSAndDispatch() for argument counts used by interpreter
788 // generator.
789 template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
790     TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count,
791     ConvertReceiverMode receiver_mode);
792 template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
793     TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count,
794     ConvertReceiverMode receiver_mode, TNode<Object>);
795 template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
796     TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count,
797     ConvertReceiverMode receiver_mode, TNode<Object>, TNode<Object>);
798 template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
799     TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count,
800     ConvertReceiverMode receiver_mode, TNode<Object>, TNode<Object>,
801     TNode<Object>);
802 
CallJSWithSpreadAndDispatch(TNode<Object> function,TNode<Context> context,const RegListNodePair & args,TNode<UintPtrT> slot_id,TNode<HeapObject> maybe_feedback_vector)803 void InterpreterAssembler::CallJSWithSpreadAndDispatch(
804     TNode<Object> function, TNode<Context> context, const RegListNodePair& args,
805     TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector) {
806   DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
807   DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), ConvertReceiverMode::kAny);
808   CollectCallFeedback(function, context, maybe_feedback_vector, slot_id);
809   Comment("call using CallWithSpread builtin");
810   Callable callable = CodeFactory::InterpreterPushArgsThenCall(
811       isolate(), ConvertReceiverMode::kAny,
812       InterpreterPushArgsMode::kWithFinalSpread);
813   TNode<Code> code_target = HeapConstant(callable.code());
814 
815   TNode<Int32T> receiver_count = Int32Constant(1);
816   TNode<Word32T> args_count = Int32Sub(args.reg_count(), receiver_count);
817   TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
818                                    args_count, args.base_reg_location(),
819                                    function);
820   // TailCallStubThenDispatch updates accumulator with result.
821   accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
822 }
823 
Construct(TNode<Object> target,TNode<Context> context,TNode<Object> new_target,const RegListNodePair & args,TNode<UintPtrT> slot_id,TNode<HeapObject> maybe_feedback_vector)824 TNode<Object> InterpreterAssembler::Construct(
825     TNode<Object> target, TNode<Context> context, TNode<Object> new_target,
826     const RegListNodePair& args, TNode<UintPtrT> slot_id,
827     TNode<HeapObject> maybe_feedback_vector) {
828   DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
829   TVARIABLE(Object, var_result);
830   TVARIABLE(AllocationSite, var_site);
831   Label return_result(this), construct_generic(this),
832       construct_array(this, &var_site);
833 
834   CollectConstructFeedback(context, target, new_target, maybe_feedback_vector,
835                            slot_id, &construct_generic, &construct_array,
836                            &var_site);
837 
838   BIND(&construct_generic);
839   {
840     // TODO(bmeurer): Remove the generic type_info parameter from the Construct.
841     Comment("call using Construct builtin");
842     Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
843         isolate(), InterpreterPushArgsMode::kOther);
844     var_result =
845         CallStub(callable, context, args.reg_count(), args.base_reg_location(),
846                  target, new_target, UndefinedConstant());
847     Goto(&return_result);
848   }
849 
850   BIND(&construct_array);
851   {
852     // TODO(bmeurer): Introduce a dedicated builtin to deal with the Array
853     // constructor feedback collection inside of Ignition.
854     Comment("call using ConstructArray builtin");
855     Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
856         isolate(), InterpreterPushArgsMode::kArrayFunction);
857     var_result =
858         CallStub(callable, context, args.reg_count(), args.base_reg_location(),
859                  target, new_target, var_site.value());
860     Goto(&return_result);
861   }
862 
863   BIND(&return_result);
864   return var_result.value();
865 }
866 
ConstructWithSpread(TNode<Object> target,TNode<Context> context,TNode<Object> new_target,const RegListNodePair & args,TNode<UintPtrT> slot_id,TNode<HeapObject> maybe_feedback_vector)867 TNode<Object> InterpreterAssembler::ConstructWithSpread(
868     TNode<Object> target, TNode<Context> context, TNode<Object> new_target,
869     const RegListNodePair& args, TNode<UintPtrT> slot_id,
870     TNode<HeapObject> maybe_feedback_vector) {
871   // TODO(bmeurer): Unify this with the Construct bytecode feedback
872   // above once we have a way to pass the AllocationSite to the Array
873   // constructor _and_ spread the last argument at the same time.
874   DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
875   Label extra_checks(this, Label::kDeferred), construct(this);
876   GotoIf(IsUndefined(maybe_feedback_vector), &construct);
877 
878   TNode<FeedbackVector> feedback_vector = CAST(maybe_feedback_vector);
879 
880   // Increment the call count.
881   IncrementCallCount(feedback_vector, slot_id);
882 
883   // Check if we have monomorphic {new_target} feedback already.
884   TNode<MaybeObject> feedback =
885       LoadFeedbackVectorSlot(feedback_vector, slot_id);
886   Branch(IsWeakReferenceToObject(feedback, new_target), &construct,
887          &extra_checks);
888 
889   BIND(&extra_checks);
890   {
891     Label check_initialized(this), initialize(this), mark_megamorphic(this);
892 
893     // Check if it is a megamorphic {new_target}.
894     Comment("check if megamorphic");
895     TNode<BoolT> is_megamorphic = TaggedEqual(
896         feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
897     GotoIf(is_megamorphic, &construct);
898 
899     Comment("check if weak reference");
900     GotoIfNot(IsWeakOrCleared(feedback), &check_initialized);
901 
902     // If the weak reference is cleared, we have a new chance to become
903     // monomorphic.
904     Comment("check if weak reference is cleared");
905     Branch(IsCleared(feedback), &initialize, &mark_megamorphic);
906 
907     BIND(&check_initialized);
908     {
909       // Check if it is uninitialized.
910       Comment("check if uninitialized");
911       TNode<BoolT> is_uninitialized =
912           TaggedEqual(feedback, UninitializedSymbolConstant());
913       Branch(is_uninitialized, &initialize, &mark_megamorphic);
914     }
915 
916     BIND(&initialize);
917     {
918       Comment("check if function in same native context");
919       GotoIf(TaggedIsSmi(new_target), &mark_megamorphic);
920       // Check if the {new_target} is a JSFunction or JSBoundFunction
921       // in the current native context.
922       TVARIABLE(HeapObject, var_current, CAST(new_target));
923       Label loop(this, &var_current), done_loop(this);
924       Goto(&loop);
925       BIND(&loop);
926       {
927         Label if_boundfunction(this), if_function(this);
928         TNode<HeapObject> current = var_current.value();
929         TNode<Uint16T> current_instance_type = LoadInstanceType(current);
930         GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
931                &if_boundfunction);
932         Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
933                &if_function, &mark_megamorphic);
934 
935         BIND(&if_function);
936         {
937           // Check that the JSFunction {current} is in the current native
938           // context.
939           TNode<Context> current_context =
940               CAST(LoadObjectField(current, JSFunction::kContextOffset));
941           TNode<NativeContext> current_native_context =
942               LoadNativeContext(current_context);
943           Branch(
944               TaggedEqual(LoadNativeContext(context), current_native_context),
945               &done_loop, &mark_megamorphic);
946         }
947 
948         BIND(&if_boundfunction);
949         {
950           // Continue with the [[BoundTargetFunction]] of {current}.
951           var_current = LoadObjectField<HeapObject>(
952               current, JSBoundFunction::kBoundTargetFunctionOffset);
953           Goto(&loop);
954         }
955       }
956       BIND(&done_loop);
957       StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id,
958                                          CAST(new_target));
959       ReportFeedbackUpdate(feedback_vector, slot_id,
960                            "ConstructWithSpread:Initialize");
961       Goto(&construct);
962     }
963 
964     BIND(&mark_megamorphic);
965     {
966       // MegamorphicSentinel is an immortal immovable object so
967       // write-barrier is not needed.
968       Comment("transition to megamorphic");
969       DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol));
970       StoreFeedbackVectorSlot(
971           feedback_vector, slot_id,
972           HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
973           SKIP_WRITE_BARRIER);
974       ReportFeedbackUpdate(feedback_vector, slot_id,
975                            "ConstructWithSpread:TransitionMegamorphic");
976       Goto(&construct);
977     }
978   }
979 
980   BIND(&construct);
981   Comment("call using ConstructWithSpread builtin");
982   Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
983       isolate(), InterpreterPushArgsMode::kWithFinalSpread);
984   return CallStub(callable, context, args.reg_count(), args.base_reg_location(),
985                   target, new_target, UndefinedConstant());
986 }
987 
988 template <class T>
CallRuntimeN(TNode<Uint32T> function_id,TNode<Context> context,const RegListNodePair & args,int return_count)989 TNode<T> InterpreterAssembler::CallRuntimeN(TNode<Uint32T> function_id,
990                                             TNode<Context> context,
991                                             const RegListNodePair& args,
992                                             int return_count) {
993   DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
994   DCHECK(Bytecodes::IsCallRuntime(bytecode_));
995   Callable callable = CodeFactory::InterpreterCEntry(isolate(), return_count);
996   TNode<Code> code_target = HeapConstant(callable.code());
997 
998   // Get the function entry from the function id.
999   TNode<RawPtrT> function_table = ReinterpretCast<RawPtrT>(ExternalConstant(
1000       ExternalReference::runtime_function_table_address(isolate())));
1001   TNode<Word32T> function_offset =
1002       Int32Mul(function_id, Int32Constant(sizeof(Runtime::Function)));
1003   TNode<WordT> function =
1004       IntPtrAdd(function_table, ChangeUint32ToWord(function_offset));
1005   TNode<RawPtrT> function_entry = Load<RawPtrT>(
1006       function, IntPtrConstant(offsetof(Runtime::Function, entry)));
1007 
1008   return CallStub<T>(callable.descriptor(), code_target, context,
1009                      args.reg_count(), args.base_reg_location(),
1010                      function_entry);
1011 }
1012 
1013 template V8_EXPORT_PRIVATE TNode<Object> InterpreterAssembler::CallRuntimeN(
1014     TNode<Uint32T> function_id, TNode<Context> context,
1015     const RegListNodePair& args, int return_count);
1016 template V8_EXPORT_PRIVATE TNode<PairT<Object, Object>>
1017 InterpreterAssembler::CallRuntimeN(TNode<Uint32T> function_id,
1018                                    TNode<Context> context,
1019                                    const RegListNodePair& args,
1020                                    int return_count);
1021 
UpdateInterruptBudget(TNode<Int32T> weight,bool backward)1022 void InterpreterAssembler::UpdateInterruptBudget(TNode<Int32T> weight,
1023                                                  bool backward) {
1024   Comment("[ UpdateInterruptBudget");
1025 
1026   // Assert that the weight is positive (negative weights should be implemented
1027   // as backward updates).
1028   CSA_ASSERT(this, Int32GreaterThanOrEqual(weight, Int32Constant(0)));
1029 
1030   Label load_budget_from_bytecode(this), load_budget_done(this);
1031   TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
1032   TNode<FeedbackCell> feedback_cell =
1033       LoadObjectField<FeedbackCell>(function, JSFunction::kFeedbackCellOffset);
1034   TNode<Int32T> old_budget = LoadObjectField<Int32T>(
1035       feedback_cell, FeedbackCell::kInterruptBudgetOffset);
1036 
1037   // Make sure we include the current bytecode in the budget calculation.
1038   TNode<Int32T> budget_after_bytecode =
1039       Int32Sub(old_budget, Int32Constant(CurrentBytecodeSize()));
1040 
1041   Label done(this);
1042   TVARIABLE(Int32T, new_budget);
1043   if (backward) {
1044     // Update budget by |weight| and check if it reaches zero.
1045     new_budget = Int32Sub(budget_after_bytecode, weight);
1046     TNode<BoolT> condition =
1047         Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0));
1048     Label ok(this), interrupt_check(this, Label::kDeferred);
1049     Branch(condition, &ok, &interrupt_check);
1050 
1051     BIND(&interrupt_check);
1052     CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, GetContext(),
1053                 function);
1054     Goto(&done);
1055 
1056     BIND(&ok);
1057   } else {
1058     // For a forward jump, we know we only increase the interrupt budget, so
1059     // no need to check if it's below zero.
1060     new_budget = Int32Add(budget_after_bytecode, weight);
1061   }
1062 
1063   // Update budget.
1064   StoreObjectFieldNoWriteBarrier(
1065       feedback_cell, FeedbackCell::kInterruptBudgetOffset, new_budget.value());
1066   Goto(&done);
1067   BIND(&done);
1068   Comment("] UpdateInterruptBudget");
1069 }
1070 
Advance()1071 TNode<IntPtrT> InterpreterAssembler::Advance() {
1072   return Advance(CurrentBytecodeSize());
1073 }
1074 
Advance(int delta)1075 TNode<IntPtrT> InterpreterAssembler::Advance(int delta) {
1076   return Advance(IntPtrConstant(delta));
1077 }
1078 
Advance(TNode<IntPtrT> delta,bool backward)1079 TNode<IntPtrT> InterpreterAssembler::Advance(TNode<IntPtrT> delta,
1080                                              bool backward) {
1081 #ifdef V8_TRACE_IGNITION
1082   TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
1083 #endif
1084   TNode<IntPtrT> next_offset = backward ? IntPtrSub(BytecodeOffset(), delta)
1085                                         : IntPtrAdd(BytecodeOffset(), delta);
1086   bytecode_offset_ = next_offset;
1087   return next_offset;
1088 }
1089 
Jump(TNode<IntPtrT> jump_offset,bool backward)1090 void InterpreterAssembler::Jump(TNode<IntPtrT> jump_offset, bool backward) {
1091   DCHECK(!Bytecodes::IsStarLookahead(bytecode_, operand_scale_));
1092 
1093   UpdateInterruptBudget(TruncateIntPtrToInt32(jump_offset), backward);
1094   TNode<IntPtrT> new_bytecode_offset = Advance(jump_offset, backward);
1095   TNode<RawPtrT> target_bytecode =
1096       UncheckedCast<RawPtrT>(LoadBytecode(new_bytecode_offset));
1097   DispatchToBytecode(target_bytecode, new_bytecode_offset);
1098 }
1099 
Jump(TNode<IntPtrT> jump_offset)1100 void InterpreterAssembler::Jump(TNode<IntPtrT> jump_offset) {
1101   Jump(jump_offset, false);
1102 }
1103 
JumpBackward(TNode<IntPtrT> jump_offset)1104 void InterpreterAssembler::JumpBackward(TNode<IntPtrT> jump_offset) {
1105   Jump(jump_offset, true);
1106 }
1107 
JumpConditional(TNode<BoolT> condition,TNode<IntPtrT> jump_offset)1108 void InterpreterAssembler::JumpConditional(TNode<BoolT> condition,
1109                                            TNode<IntPtrT> jump_offset) {
1110   Label match(this), no_match(this);
1111 
1112   Branch(condition, &match, &no_match);
1113   BIND(&match);
1114   Jump(jump_offset);
1115   BIND(&no_match);
1116   Dispatch();
1117 }
1118 
JumpIfTaggedEqual(TNode<Object> lhs,TNode<Object> rhs,TNode<IntPtrT> jump_offset)1119 void InterpreterAssembler::JumpIfTaggedEqual(TNode<Object> lhs,
1120                                              TNode<Object> rhs,
1121                                              TNode<IntPtrT> jump_offset) {
1122   JumpConditional(TaggedEqual(lhs, rhs), jump_offset);
1123 }
1124 
JumpIfTaggedNotEqual(TNode<Object> lhs,TNode<Object> rhs,TNode<IntPtrT> jump_offset)1125 void InterpreterAssembler::JumpIfTaggedNotEqual(TNode<Object> lhs,
1126                                                 TNode<Object> rhs,
1127                                                 TNode<IntPtrT> jump_offset) {
1128   JumpConditional(TaggedNotEqual(lhs, rhs), jump_offset);
1129 }
1130 
LoadBytecode(TNode<IntPtrT> bytecode_offset)1131 TNode<WordT> InterpreterAssembler::LoadBytecode(
1132     TNode<IntPtrT> bytecode_offset) {
1133   TNode<Uint8T> bytecode =
1134       Load<Uint8T>(BytecodeArrayTaggedPointer(), bytecode_offset);
1135   return ChangeUint32ToWord(bytecode);
1136 }
1137 
StarDispatchLookahead(TNode<WordT> target_bytecode)1138 TNode<WordT> InterpreterAssembler::StarDispatchLookahead(
1139     TNode<WordT> target_bytecode) {
1140   Label do_inline_star(this), done(this);
1141 
1142   TVARIABLE(WordT, var_bytecode, target_bytecode);
1143 
1144   TNode<Int32T> star_bytecode =
1145       Int32Constant(static_cast<int>(Bytecode::kStar));
1146   TNode<BoolT> is_star =
1147       Word32Equal(TruncateWordToInt32(target_bytecode), star_bytecode);
1148   Branch(is_star, &do_inline_star, &done);
1149 
1150   BIND(&do_inline_star);
1151   {
1152     InlineStar();
1153     var_bytecode = LoadBytecode(BytecodeOffset());
1154     Goto(&done);
1155   }
1156   BIND(&done);
1157   return var_bytecode.value();
1158 }
1159 
InlineStar()1160 void InterpreterAssembler::InlineStar() {
1161   Bytecode previous_bytecode = bytecode_;
1162   AccumulatorUse previous_acc_use = accumulator_use_;
1163 
1164   bytecode_ = Bytecode::kStar;
1165   accumulator_use_ = AccumulatorUse::kNone;
1166 
1167 #ifdef V8_TRACE_IGNITION
1168   TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
1169 #endif
1170   StoreRegister(GetAccumulator(),
1171                 BytecodeOperandReg(0, LoadSensitivity::kSafe));
1172 
1173   DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
1174 
1175   Advance();
1176   bytecode_ = previous_bytecode;
1177   accumulator_use_ = previous_acc_use;
1178 }
1179 
Dispatch()1180 void InterpreterAssembler::Dispatch() {
1181   Comment("========= Dispatch");
1182   DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
1183   TNode<IntPtrT> target_offset = Advance();
1184   TNode<WordT> target_bytecode = LoadBytecode(target_offset);
1185 
1186   if (Bytecodes::IsStarLookahead(bytecode_, operand_scale_)) {
1187     target_bytecode = StarDispatchLookahead(target_bytecode);
1188   }
1189   DispatchToBytecode(target_bytecode, BytecodeOffset());
1190 }
1191 
DispatchToBytecode(TNode<WordT> target_bytecode,TNode<IntPtrT> new_bytecode_offset)1192 void InterpreterAssembler::DispatchToBytecode(
1193     TNode<WordT> target_bytecode, TNode<IntPtrT> new_bytecode_offset) {
1194   if (FLAG_trace_ignition_dispatches) {
1195     TraceBytecodeDispatch(target_bytecode);
1196   }
1197 
1198   TNode<RawPtrT> target_code_entry = Load<RawPtrT>(
1199       DispatchTablePointer(), TimesSystemPointerSize(target_bytecode));
1200 
1201   DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset);
1202 }
1203 
DispatchToBytecodeHandlerEntry(TNode<RawPtrT> handler_entry,TNode<IntPtrT> bytecode_offset)1204 void InterpreterAssembler::DispatchToBytecodeHandlerEntry(
1205     TNode<RawPtrT> handler_entry, TNode<IntPtrT> bytecode_offset) {
1206   // Propagate speculation poisoning.
1207   TNode<RawPtrT> poisoned_handler_entry =
1208       UncheckedCast<RawPtrT>(WordPoisonOnSpeculation(handler_entry));
1209   TailCallBytecodeDispatch(InterpreterDispatchDescriptor{},
1210                            poisoned_handler_entry, GetAccumulatorUnchecked(),
1211                            bytecode_offset, BytecodeArrayTaggedPointer(),
1212                            DispatchTablePointer());
1213 }
1214 
DispatchWide(OperandScale operand_scale)1215 void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
1216   // Dispatching a wide bytecode requires treating the prefix
1217   // bytecode a base pointer into the dispatch table and dispatching
1218   // the bytecode that follows relative to this base.
1219   //
1220   //   Indices 0-255 correspond to bytecodes with operand_scale == 0
1221   //   Indices 256-511 correspond to bytecodes with operand_scale == 1
1222   //   Indices 512-767 correspond to bytecodes with operand_scale == 2
1223   DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
1224   TNode<IntPtrT> next_bytecode_offset = Advance(1);
1225   TNode<WordT> next_bytecode = LoadBytecode(next_bytecode_offset);
1226 
1227   if (FLAG_trace_ignition_dispatches) {
1228     TraceBytecodeDispatch(next_bytecode);
1229   }
1230 
1231   TNode<IntPtrT> base_index;
1232   switch (operand_scale) {
1233     case OperandScale::kDouble:
1234       base_index = IntPtrConstant(1 << kBitsPerByte);
1235       break;
1236     case OperandScale::kQuadruple:
1237       base_index = IntPtrConstant(2 << kBitsPerByte);
1238       break;
1239     default:
1240       UNREACHABLE();
1241   }
1242   TNode<WordT> target_index = IntPtrAdd(base_index, next_bytecode);
1243   TNode<RawPtrT> target_code_entry = Load<RawPtrT>(
1244       DispatchTablePointer(), TimesSystemPointerSize(target_index));
1245 
1246   DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset);
1247 }
1248 
UpdateInterruptBudgetOnReturn()1249 void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
1250   // TODO(rmcilroy): Investigate whether it is worth supporting self
1251   // optimization of primitive functions like FullCodegen.
1252 
1253   // Update profiling count by the number of bytes between the end of the
1254   // current bytecode and the start of the first one, to simulate backedge to
1255   // start of function.
1256   //
1257   // With headers and current offset, the bytecode array layout looks like:
1258   //
1259   //           <---------- simulated backedge ----------
1260   // | header | first bytecode | .... | return bytecode |
1261   //  |<------ current offset ------->
1262   //  ^ tagged bytecode array pointer
1263   //
1264   // UpdateInterruptBudget already handles adding the bytecode size to the
1265   // length of the back-edge, so we just have to correct for the non-zero offset
1266   // of the first bytecode.
1267 
1268   const int kFirstBytecodeOffset = BytecodeArray::kHeaderSize - kHeapObjectTag;
1269   TNode<Int32T> profiling_weight =
1270       Int32Sub(TruncateIntPtrToInt32(BytecodeOffset()),
1271                Int32Constant(kFirstBytecodeOffset));
1272   UpdateInterruptBudget(profiling_weight, true);
1273 }
1274 
LoadOsrNestingLevel()1275 TNode<Int8T> InterpreterAssembler::LoadOsrNestingLevel() {
1276   return LoadObjectField<Int8T>(BytecodeArrayTaggedPointer(),
1277                                 BytecodeArray::kOsrNestingLevelOffset);
1278 }
1279 
Abort(AbortReason abort_reason)1280 void InterpreterAssembler::Abort(AbortReason abort_reason) {
1281   TNode<Smi> abort_id = SmiConstant(abort_reason);
1282   CallRuntime(Runtime::kAbort, GetContext(), abort_id);
1283 }
1284 
AbortIfWordNotEqual(TNode<WordT> lhs,TNode<WordT> rhs,AbortReason abort_reason)1285 void InterpreterAssembler::AbortIfWordNotEqual(TNode<WordT> lhs,
1286                                                TNode<WordT> rhs,
1287                                                AbortReason abort_reason) {
1288   Label ok(this), abort(this, Label::kDeferred);
1289   Branch(WordEqual(lhs, rhs), &ok, &abort);
1290 
1291   BIND(&abort);
1292   Abort(abort_reason);
1293   Goto(&ok);
1294 
1295   BIND(&ok);
1296 }
1297 
MaybeDropFrames(TNode<Context> context)1298 void InterpreterAssembler::MaybeDropFrames(TNode<Context> context) {
1299   TNode<ExternalReference> restart_fp_address =
1300       ExternalConstant(ExternalReference::debug_restart_fp_address(isolate()));
1301 
1302   TNode<IntPtrT> restart_fp = Load<IntPtrT>(restart_fp_address);
1303   TNode<IntPtrT> null = IntPtrConstant(0);
1304 
1305   Label ok(this), drop_frames(this);
1306   Branch(IntPtrEqual(restart_fp, null), &ok, &drop_frames);
1307 
1308   BIND(&drop_frames);
1309   // We don't expect this call to return since the frame dropper tears down
1310   // the stack and jumps into the function on the target frame to restart it.
1311   CallStub(CodeFactory::FrameDropperTrampoline(isolate()), context, restart_fp);
1312   Abort(AbortReason::kUnexpectedReturnFromFrameDropper);
1313   Goto(&ok);
1314 
1315   BIND(&ok);
1316 }
1317 
TraceBytecode(Runtime::FunctionId function_id)1318 void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
1319   CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(),
1320               SmiTag(BytecodeOffset()), GetAccumulatorUnchecked());
1321 }
1322 
TraceBytecodeDispatch(TNode<WordT> target_bytecode)1323 void InterpreterAssembler::TraceBytecodeDispatch(TNode<WordT> target_bytecode) {
1324   TNode<ExternalReference> counters_table = ExternalConstant(
1325       ExternalReference::interpreter_dispatch_counters(isolate()));
1326   TNode<IntPtrT> source_bytecode_table_index = IntPtrConstant(
1327       static_cast<int>(bytecode_) * (static_cast<int>(Bytecode::kLast) + 1));
1328 
1329   TNode<WordT> counter_offset = TimesSystemPointerSize(
1330       IntPtrAdd(source_bytecode_table_index, target_bytecode));
1331   TNode<IntPtrT> old_counter = Load<IntPtrT>(counters_table, counter_offset);
1332 
1333   Label counter_ok(this), counter_saturated(this, Label::kDeferred);
1334 
1335   TNode<BoolT> counter_reached_max = WordEqual(
1336       old_counter, IntPtrConstant(std::numeric_limits<uintptr_t>::max()));
1337   Branch(counter_reached_max, &counter_saturated, &counter_ok);
1338 
1339   BIND(&counter_ok);
1340   {
1341     TNode<IntPtrT> new_counter = IntPtrAdd(old_counter, IntPtrConstant(1));
1342     StoreNoWriteBarrier(MachineType::PointerRepresentation(), counters_table,
1343                         counter_offset, new_counter);
1344     Goto(&counter_saturated);
1345   }
1346 
1347   BIND(&counter_saturated);
1348 }
1349 
1350 // static
TargetSupportsUnalignedAccess()1351 bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
1352 #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
1353   return false;
1354 #elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390 || \
1355     V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC ||   \
1356     V8_TARGET_ARCH_PPC64
1357   return true;
1358 #else
1359 #error "Unknown Architecture"
1360 #endif
1361 }
1362 
AbortIfRegisterCountInvalid(TNode<FixedArrayBase> parameters_and_registers,TNode<IntPtrT> formal_parameter_count,TNode<UintPtrT> register_count)1363 void InterpreterAssembler::AbortIfRegisterCountInvalid(
1364     TNode<FixedArrayBase> parameters_and_registers,
1365     TNode<IntPtrT> formal_parameter_count, TNode<UintPtrT> register_count) {
1366   TNode<IntPtrT> array_size =
1367       LoadAndUntagFixedArrayBaseLength(parameters_and_registers);
1368 
1369   Label ok(this), abort(this, Label::kDeferred);
1370   Branch(UintPtrLessThanOrEqual(
1371              IntPtrAdd(formal_parameter_count, register_count), array_size),
1372          &ok, &abort);
1373 
1374   BIND(&abort);
1375   Abort(AbortReason::kInvalidParametersAndRegistersInGenerator);
1376   Goto(&ok);
1377 
1378   BIND(&ok);
1379 }
1380 
ExportParametersAndRegisterFile(TNode<FixedArray> array,const RegListNodePair & registers,TNode<Int32T> formal_parameter_count)1381 TNode<FixedArray> InterpreterAssembler::ExportParametersAndRegisterFile(
1382     TNode<FixedArray> array, const RegListNodePair& registers,
1383     TNode<Int32T> formal_parameter_count) {
1384   // Store the formal parameters (without receiver) followed by the
1385   // registers into the generator's internal parameters_and_registers field.
1386   TNode<IntPtrT> formal_parameter_count_intptr =
1387       Signed(ChangeUint32ToWord(formal_parameter_count));
1388   TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count());
1389   if (FLAG_debug_code) {
1390     CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
1391                                  RegisterLocation(Register(0))));
1392     AbortIfRegisterCountInvalid(array, formal_parameter_count_intptr,
1393                                 register_count);
1394   }
1395 
1396   {
1397     TVARIABLE(IntPtrT, var_index);
1398     var_index = IntPtrConstant(0);
1399 
1400     // Iterate over parameters and write them into the array.
1401     Label loop(this, &var_index), done_loop(this);
1402 
1403     TNode<IntPtrT> reg_base =
1404         IntPtrConstant(Register::FromParameterIndex(0, 1).ToOperand() + 1);
1405 
1406     Goto(&loop);
1407     BIND(&loop);
1408     {
1409       TNode<IntPtrT> index = var_index.value();
1410       GotoIfNot(UintPtrLessThan(index, formal_parameter_count_intptr),
1411                 &done_loop);
1412 
1413       TNode<IntPtrT> reg_index = IntPtrAdd(reg_base, index);
1414       TNode<Object> value = LoadRegister(reg_index);
1415 
1416       StoreFixedArrayElement(array, index, value);
1417 
1418       var_index = IntPtrAdd(index, IntPtrConstant(1));
1419       Goto(&loop);
1420     }
1421     BIND(&done_loop);
1422   }
1423 
1424   {
1425     // Iterate over register file and write values into array.
1426     // The mapping of register to array index must match that used in
1427     // BytecodeGraphBuilder::VisitResumeGenerator.
1428     TVARIABLE(IntPtrT, var_index);
1429     var_index = IntPtrConstant(0);
1430 
1431     Label loop(this, &var_index), done_loop(this);
1432     Goto(&loop);
1433     BIND(&loop);
1434     {
1435       TNode<IntPtrT> index = var_index.value();
1436       GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
1437 
1438       TNode<IntPtrT> reg_index =
1439           IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
1440       TNode<Object> value = LoadRegister(reg_index);
1441 
1442       TNode<IntPtrT> array_index =
1443           IntPtrAdd(formal_parameter_count_intptr, index);
1444       StoreFixedArrayElement(array, array_index, value);
1445 
1446       var_index = IntPtrAdd(index, IntPtrConstant(1));
1447       Goto(&loop);
1448     }
1449     BIND(&done_loop);
1450   }
1451 
1452   return array;
1453 }
1454 
ImportRegisterFile(TNode<FixedArray> array,const RegListNodePair & registers,TNode<Int32T> formal_parameter_count)1455 TNode<FixedArray> InterpreterAssembler::ImportRegisterFile(
1456     TNode<FixedArray> array, const RegListNodePair& registers,
1457     TNode<Int32T> formal_parameter_count) {
1458   TNode<IntPtrT> formal_parameter_count_intptr =
1459       Signed(ChangeUint32ToWord(formal_parameter_count));
1460   TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count());
1461   if (FLAG_debug_code) {
1462     CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
1463                                  RegisterLocation(Register(0))));
1464     AbortIfRegisterCountInvalid(array, formal_parameter_count_intptr,
1465                                 register_count);
1466   }
1467 
1468   TVARIABLE(IntPtrT, var_index, IntPtrConstant(0));
1469 
1470   // Iterate over array and write values into register file.  Also erase the
1471   // array contents to not keep them alive artificially.
1472   Label loop(this, &var_index), done_loop(this);
1473   Goto(&loop);
1474   BIND(&loop);
1475   {
1476     TNode<IntPtrT> index = var_index.value();
1477     GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
1478 
1479     TNode<IntPtrT> array_index =
1480         IntPtrAdd(formal_parameter_count_intptr, index);
1481     TNode<Object> value = LoadFixedArrayElement(array, array_index);
1482 
1483     TNode<IntPtrT> reg_index =
1484         IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
1485     StoreRegister(value, reg_index);
1486 
1487     StoreFixedArrayElement(array, array_index, StaleRegisterConstant());
1488 
1489     var_index = IntPtrAdd(index, IntPtrConstant(1));
1490     Goto(&loop);
1491   }
1492   BIND(&done_loop);
1493 
1494   return array;
1495 }
1496 
CurrentBytecodeSize() const1497 int InterpreterAssembler::CurrentBytecodeSize() const {
1498   return Bytecodes::Size(bytecode_, operand_scale_);
1499 }
1500 
ToNumberOrNumeric(Object::Conversion mode)1501 void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) {
1502   TNode<Object> object = GetAccumulator();
1503   TNode<Context> context = GetContext();
1504 
1505   TVARIABLE(Smi, var_type_feedback);
1506   TVARIABLE(Numeric, var_result);
1507   Label if_done(this), if_objectissmi(this), if_objectisheapnumber(this),
1508       if_objectisother(this, Label::kDeferred);
1509 
1510   GotoIf(TaggedIsSmi(object), &if_objectissmi);
1511   Branch(IsHeapNumber(CAST(object)), &if_objectisheapnumber, &if_objectisother);
1512 
1513   BIND(&if_objectissmi);
1514   {
1515     var_result = CAST(object);
1516     var_type_feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall);
1517     Goto(&if_done);
1518   }
1519 
1520   BIND(&if_objectisheapnumber);
1521   {
1522     var_result = CAST(object);
1523     var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber);
1524     Goto(&if_done);
1525   }
1526 
1527   BIND(&if_objectisother);
1528   {
1529     auto builtin = Builtins::kNonNumberToNumber;
1530     if (mode == Object::Conversion::kToNumeric) {
1531       builtin = Builtins::kNonNumberToNumeric;
1532       // Special case for collecting BigInt feedback.
1533       Label not_bigint(this);
1534       GotoIfNot(IsBigInt(CAST(object)), &not_bigint);
1535       {
1536         var_result = CAST(object);
1537         var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt);
1538         Goto(&if_done);
1539       }
1540       BIND(&not_bigint);
1541     }
1542 
1543     // Convert {object} by calling out to the appropriate builtin.
1544     var_result = CAST(CallBuiltin(builtin, context, object));
1545     var_type_feedback = SmiConstant(BinaryOperationFeedback::kAny);
1546     Goto(&if_done);
1547   }
1548 
1549   BIND(&if_done);
1550 
1551   // Record the type feedback collected for {object}.
1552   TNode<UintPtrT> slot_index = BytecodeOperandIdx(0);
1553   TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
1554 
1555   UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_index);
1556 
1557   SetAccumulator(var_result.value());
1558   Dispatch();
1559 }
1560 
1561 }  // namespace interpreter
1562 }  // namespace internal
1563 }  // namespace v8
1564