1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/interpreter/interpreter-assembler.h"
6
7 #include <limits>
8 #include <ostream>
9
10 #include "src/codegen/code-factory.h"
11 #include "src/codegen/interface-descriptors-inl.h"
12 #include "src/codegen/machine-type.h"
13 #include "src/execution/frames.h"
14 #include "src/interpreter/bytecodes.h"
15 #include "src/interpreter/interpreter.h"
16 #include "src/objects/objects-inl.h"
17 #include "src/zone/zone.h"
18
19 namespace v8 {
20 namespace internal {
21 namespace interpreter {
22
23 using compiler::CodeAssemblerState;
24
InterpreterAssembler(CodeAssemblerState * state,Bytecode bytecode,OperandScale operand_scale)25 InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
26 Bytecode bytecode,
27 OperandScale operand_scale)
28 : CodeStubAssembler(state),
29 bytecode_(bytecode),
30 operand_scale_(operand_scale),
31 TVARIABLE_CONSTRUCTOR(interpreted_frame_pointer_),
32 TVARIABLE_CONSTRUCTOR(bytecode_array_,
33 Parameter<BytecodeArray>(
34 InterpreterDispatchDescriptor::kBytecodeArray)),
35 TVARIABLE_CONSTRUCTOR(
36 bytecode_offset_,
37 UncheckedParameter<IntPtrT>(
38 InterpreterDispatchDescriptor::kBytecodeOffset)),
39 TVARIABLE_CONSTRUCTOR(dispatch_table_,
40 UncheckedParameter<ExternalReference>(
41 InterpreterDispatchDescriptor::kDispatchTable)),
42 TVARIABLE_CONSTRUCTOR(
43 accumulator_,
44 Parameter<Object>(InterpreterDispatchDescriptor::kAccumulator)),
45 implicit_register_use_(ImplicitRegisterUse::kNone),
46 made_call_(false),
47 reloaded_frame_ptr_(false),
48 bytecode_array_valid_(true) {
49 #ifdef V8_TRACE_UNOPTIMIZED
50 TraceBytecode(Runtime::kTraceUnoptimizedBytecodeEntry);
51 #endif
52 RegisterCallGenerationCallbacks([this] { CallPrologue(); },
53 [this] { CallEpilogue(); });
54
55 // Save the bytecode offset immediately if bytecode will make a call along
56 // the critical path, or it is a return bytecode.
57 if (Bytecodes::MakesCallAlongCriticalPath(bytecode) ||
58 Bytecodes::Returns(bytecode)) {
59 SaveBytecodeOffset();
60 }
61 }
62
~InterpreterAssembler()63 InterpreterAssembler::~InterpreterAssembler() {
64 // If the following check fails the handler does not use the
65 // accumulator in the way described in the bytecode definitions in
66 // bytecodes.h.
67 DCHECK_EQ(implicit_register_use_,
68 Bytecodes::GetImplicitRegisterUse(bytecode_));
69 UnregisterCallGenerationCallbacks();
70 }
71
GetInterpretedFramePointer()72 TNode<RawPtrT> InterpreterAssembler::GetInterpretedFramePointer() {
73 if (!interpreted_frame_pointer_.IsBound()) {
74 interpreted_frame_pointer_ = LoadParentFramePointer();
75 } else if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
76 !reloaded_frame_ptr_) {
77 interpreted_frame_pointer_ = LoadParentFramePointer();
78 reloaded_frame_ptr_ = true;
79 }
80 return interpreted_frame_pointer_.value();
81 }
82
BytecodeOffset()83 TNode<IntPtrT> InterpreterAssembler::BytecodeOffset() {
84 if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
85 (bytecode_offset_.value() ==
86 UncheckedParameter<IntPtrT>(
87 InterpreterDispatchDescriptor::kBytecodeOffset))) {
88 bytecode_offset_ = ReloadBytecodeOffset();
89 }
90 return bytecode_offset_.value();
91 }
92
ReloadBytecodeOffset()93 TNode<IntPtrT> InterpreterAssembler::ReloadBytecodeOffset() {
94 TNode<IntPtrT> offset = LoadAndUntagRegister(Register::bytecode_offset());
95 if (operand_scale() != OperandScale::kSingle) {
96 // Add one to the offset such that it points to the actual bytecode rather
97 // than the Wide / ExtraWide prefix bytecode.
98 offset = IntPtrAdd(offset, IntPtrConstant(1));
99 }
100 return offset;
101 }
102
SaveBytecodeOffset()103 void InterpreterAssembler::SaveBytecodeOffset() {
104 TNode<IntPtrT> bytecode_offset = BytecodeOffset();
105 if (operand_scale() != OperandScale::kSingle) {
106 // Subtract one from the bytecode_offset such that it points to the Wide /
107 // ExtraWide prefix bytecode.
108 bytecode_offset = IntPtrSub(BytecodeOffset(), IntPtrConstant(1));
109 }
110 int store_offset =
111 Register::bytecode_offset().ToOperand() * kSystemPointerSize;
112 TNode<RawPtrT> base = GetInterpretedFramePointer();
113
114 if (SmiValuesAre32Bits()) {
115 int zero_offset = store_offset + 4;
116 int payload_offset = store_offset;
117 #if V8_TARGET_LITTLE_ENDIAN
118 std::swap(zero_offset, payload_offset);
119 #endif
120 StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
121 IntPtrConstant(zero_offset), Int32Constant(0));
122 StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
123 IntPtrConstant(payload_offset),
124 TruncateIntPtrToInt32(bytecode_offset));
125 } else {
126 StoreFullTaggedNoWriteBarrier(base, IntPtrConstant(store_offset),
127 SmiTag(bytecode_offset));
128 }
129 }
130
BytecodeArrayTaggedPointer()131 TNode<BytecodeArray> InterpreterAssembler::BytecodeArrayTaggedPointer() {
132 // Force a re-load of the bytecode array after every call in case the debugger
133 // has been activated.
134 if (!bytecode_array_valid_) {
135 bytecode_array_ = CAST(LoadRegister(Register::bytecode_array()));
136 bytecode_array_valid_ = true;
137 }
138 return bytecode_array_.value();
139 }
140
DispatchTablePointer()141 TNode<ExternalReference> InterpreterAssembler::DispatchTablePointer() {
142 if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
143 (dispatch_table_.value() ==
144 UncheckedParameter<ExternalReference>(
145 InterpreterDispatchDescriptor::kDispatchTable))) {
146 dispatch_table_ = ExternalConstant(
147 ExternalReference::interpreter_dispatch_table_address(isolate()));
148 }
149 return dispatch_table_.value();
150 }
151
GetAccumulatorUnchecked()152 TNode<Object> InterpreterAssembler::GetAccumulatorUnchecked() {
153 return accumulator_.value();
154 }
155
GetAccumulator()156 TNode<Object> InterpreterAssembler::GetAccumulator() {
157 DCHECK(Bytecodes::ReadsAccumulator(bytecode_));
158 implicit_register_use_ =
159 implicit_register_use_ | ImplicitRegisterUse::kReadAccumulator;
160 return GetAccumulatorUnchecked();
161 }
162
SetAccumulator(TNode<Object> value)163 void InterpreterAssembler::SetAccumulator(TNode<Object> value) {
164 DCHECK(Bytecodes::WritesAccumulator(bytecode_));
165 implicit_register_use_ =
166 implicit_register_use_ | ImplicitRegisterUse::kWriteAccumulator;
167 accumulator_ = value;
168 }
169
GetContext()170 TNode<Context> InterpreterAssembler::GetContext() {
171 return CAST(LoadRegister(Register::current_context()));
172 }
173
SetContext(TNode<Context> value)174 void InterpreterAssembler::SetContext(TNode<Context> value) {
175 StoreRegister(value, Register::current_context());
176 }
177
GetContextAtDepth(TNode<Context> context,TNode<Uint32T> depth)178 TNode<Context> InterpreterAssembler::GetContextAtDepth(TNode<Context> context,
179 TNode<Uint32T> depth) {
180 TVARIABLE(Context, cur_context, context);
181 TVARIABLE(Uint32T, cur_depth, depth);
182
183 Label context_found(this);
184
185 Label context_search(this, {&cur_depth, &cur_context});
186
187 // Fast path if the depth is 0.
188 Branch(Word32Equal(depth, Int32Constant(0)), &context_found, &context_search);
189
190 // Loop until the depth is 0.
191 BIND(&context_search);
192 {
193 cur_depth = Unsigned(Int32Sub(cur_depth.value(), Int32Constant(1)));
194 cur_context =
195 CAST(LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
196
197 Branch(Word32Equal(cur_depth.value(), Int32Constant(0)), &context_found,
198 &context_search);
199 }
200
201 BIND(&context_found);
202 return cur_context.value();
203 }
204
RegisterLocation(TNode<IntPtrT> reg_index)205 TNode<IntPtrT> InterpreterAssembler::RegisterLocation(
206 TNode<IntPtrT> reg_index) {
207 return Signed(
208 IntPtrAdd(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index)));
209 }
210
RegisterLocation(Register reg)211 TNode<IntPtrT> InterpreterAssembler::RegisterLocation(Register reg) {
212 return RegisterLocation(IntPtrConstant(reg.ToOperand()));
213 }
214
RegisterFrameOffset(TNode<IntPtrT> index)215 TNode<IntPtrT> InterpreterAssembler::RegisterFrameOffset(TNode<IntPtrT> index) {
216 return TimesSystemPointerSize(index);
217 }
218
LoadRegister(TNode<IntPtrT> reg_index)219 TNode<Object> InterpreterAssembler::LoadRegister(TNode<IntPtrT> reg_index) {
220 return LoadFullTagged(GetInterpretedFramePointer(),
221 RegisterFrameOffset(reg_index));
222 }
223
LoadRegister(Register reg)224 TNode<Object> InterpreterAssembler::LoadRegister(Register reg) {
225 return LoadFullTagged(GetInterpretedFramePointer(),
226 IntPtrConstant(reg.ToOperand() * kSystemPointerSize));
227 }
228
LoadAndUntagRegister(Register reg)229 TNode<IntPtrT> InterpreterAssembler::LoadAndUntagRegister(Register reg) {
230 TNode<RawPtrT> base = GetInterpretedFramePointer();
231 int index = reg.ToOperand() * kSystemPointerSize;
232 if (SmiValuesAre32Bits()) {
233 #if V8_TARGET_LITTLE_ENDIAN
234 index += 4;
235 #endif
236 return ChangeInt32ToIntPtr(Load<Int32T>(base, IntPtrConstant(index)));
237 } else {
238 return SmiToIntPtr(CAST(LoadFullTagged(base, IntPtrConstant(index))));
239 }
240 }
241
LoadRegisterAtOperandIndex(int operand_index)242 TNode<Object> InterpreterAssembler::LoadRegisterAtOperandIndex(
243 int operand_index) {
244 return LoadRegister(BytecodeOperandReg(operand_index));
245 }
246
247 std::pair<TNode<Object>, TNode<Object>>
LoadRegisterPairAtOperandIndex(int operand_index)248 InterpreterAssembler::LoadRegisterPairAtOperandIndex(int operand_index) {
249 DCHECK_EQ(OperandType::kRegPair,
250 Bytecodes::GetOperandType(bytecode_, operand_index));
251 TNode<IntPtrT> first_reg_index = BytecodeOperandReg(operand_index);
252 TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
253 return std::make_pair(LoadRegister(first_reg_index),
254 LoadRegister(second_reg_index));
255 }
256
257 InterpreterAssembler::RegListNodePair
GetRegisterListAtOperandIndex(int operand_index)258 InterpreterAssembler::GetRegisterListAtOperandIndex(int operand_index) {
259 DCHECK(Bytecodes::IsRegisterListOperandType(
260 Bytecodes::GetOperandType(bytecode_, operand_index)));
261 DCHECK_EQ(OperandType::kRegCount,
262 Bytecodes::GetOperandType(bytecode_, operand_index + 1));
263 TNode<IntPtrT> base_reg = RegisterLocation(BytecodeOperandReg(operand_index));
264 TNode<Uint32T> reg_count = BytecodeOperandCount(operand_index + 1);
265 return RegListNodePair(base_reg, reg_count);
266 }
267
LoadRegisterFromRegisterList(const RegListNodePair & reg_list,int index)268 TNode<Object> InterpreterAssembler::LoadRegisterFromRegisterList(
269 const RegListNodePair& reg_list, int index) {
270 TNode<IntPtrT> location = RegisterLocationInRegisterList(reg_list, index);
271 return LoadFullTagged(location);
272 }
273
RegisterLocationInRegisterList(const RegListNodePair & reg_list,int index)274 TNode<IntPtrT> InterpreterAssembler::RegisterLocationInRegisterList(
275 const RegListNodePair& reg_list, int index) {
276 CSA_DCHECK(this,
277 Uint32GreaterThan(reg_list.reg_count(), Int32Constant(index)));
278 TNode<IntPtrT> offset = RegisterFrameOffset(IntPtrConstant(index));
279 // Register indexes are negative, so subtract index from base location to get
280 // location.
281 return Signed(IntPtrSub(reg_list.base_reg_location(), offset));
282 }
283
StoreRegister(TNode<Object> value,Register reg)284 void InterpreterAssembler::StoreRegister(TNode<Object> value, Register reg) {
285 StoreFullTaggedNoWriteBarrier(
286 GetInterpretedFramePointer(),
287 IntPtrConstant(reg.ToOperand() * kSystemPointerSize), value);
288 }
289
StoreRegister(TNode<Object> value,TNode<IntPtrT> reg_index)290 void InterpreterAssembler::StoreRegister(TNode<Object> value,
291 TNode<IntPtrT> reg_index) {
292 StoreFullTaggedNoWriteBarrier(GetInterpretedFramePointer(),
293 RegisterFrameOffset(reg_index), value);
294 }
295
StoreRegisterForShortStar(TNode<Object> value,TNode<WordT> opcode)296 void InterpreterAssembler::StoreRegisterForShortStar(TNode<Object> value,
297 TNode<WordT> opcode) {
298 DCHECK(Bytecodes::IsShortStar(bytecode_));
299 implicit_register_use_ =
300 implicit_register_use_ | ImplicitRegisterUse::kWriteShortStar;
301
302 CSA_DCHECK(
303 this, UintPtrGreaterThanOrEqual(opcode, UintPtrConstant(static_cast<int>(
304 Bytecode::kFirstShortStar))));
305 CSA_DCHECK(
306 this,
307 UintPtrLessThanOrEqual(
308 opcode, UintPtrConstant(static_cast<int>(Bytecode::kLastShortStar))));
309
310 // Compute the constant that we can add to a Bytecode value to map the range
311 // [Bytecode::kStar15, Bytecode::kStar0] to the range
312 // [Register(15).ToOperand(), Register(0).ToOperand()].
313 constexpr int short_star_to_operand =
314 Register(0).ToOperand() - static_cast<int>(Bytecode::kStar0);
315 // Make sure the values count in the right direction.
316 STATIC_ASSERT(short_star_to_operand ==
317 Register(1).ToOperand() - static_cast<int>(Bytecode::kStar1));
318
319 TNode<IntPtrT> offset =
320 IntPtrAdd(RegisterFrameOffset(Signed(opcode)),
321 IntPtrConstant(short_star_to_operand * kSystemPointerSize));
322 StoreFullTaggedNoWriteBarrier(GetInterpretedFramePointer(), offset, value);
323 }
324
StoreRegisterAtOperandIndex(TNode<Object> value,int operand_index)325 void InterpreterAssembler::StoreRegisterAtOperandIndex(TNode<Object> value,
326 int operand_index) {
327 StoreRegister(value, BytecodeOperandReg(operand_index));
328 }
329
StoreRegisterPairAtOperandIndex(TNode<Object> value1,TNode<Object> value2,int operand_index)330 void InterpreterAssembler::StoreRegisterPairAtOperandIndex(TNode<Object> value1,
331 TNode<Object> value2,
332 int operand_index) {
333 DCHECK_EQ(OperandType::kRegOutPair,
334 Bytecodes::GetOperandType(bytecode_, operand_index));
335 TNode<IntPtrT> first_reg_index = BytecodeOperandReg(operand_index);
336 StoreRegister(value1, first_reg_index);
337 TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
338 StoreRegister(value2, second_reg_index);
339 }
340
StoreRegisterTripleAtOperandIndex(TNode<Object> value1,TNode<Object> value2,TNode<Object> value3,int operand_index)341 void InterpreterAssembler::StoreRegisterTripleAtOperandIndex(
342 TNode<Object> value1, TNode<Object> value2, TNode<Object> value3,
343 int operand_index) {
344 DCHECK_EQ(OperandType::kRegOutTriple,
345 Bytecodes::GetOperandType(bytecode_, operand_index));
346 TNode<IntPtrT> first_reg_index = BytecodeOperandReg(operand_index);
347 StoreRegister(value1, first_reg_index);
348 TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
349 StoreRegister(value2, second_reg_index);
350 TNode<IntPtrT> third_reg_index = NextRegister(second_reg_index);
351 StoreRegister(value3, third_reg_index);
352 }
353
NextRegister(TNode<IntPtrT> reg_index)354 TNode<IntPtrT> InterpreterAssembler::NextRegister(TNode<IntPtrT> reg_index) {
355 // Register indexes are negative, so the next index is minus one.
356 return Signed(IntPtrAdd(reg_index, IntPtrConstant(-1)));
357 }
358
OperandOffset(int operand_index)359 TNode<IntPtrT> InterpreterAssembler::OperandOffset(int operand_index) {
360 return IntPtrConstant(
361 Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()));
362 }
363
BytecodeOperandUnsignedByte(int operand_index)364 TNode<Uint8T> InterpreterAssembler::BytecodeOperandUnsignedByte(
365 int operand_index) {
366 DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
367 DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
368 bytecode_, operand_index, operand_scale()));
369 TNode<IntPtrT> operand_offset = OperandOffset(operand_index);
370 return Load<Uint8T>(BytecodeArrayTaggedPointer(),
371 IntPtrAdd(BytecodeOffset(), operand_offset));
372 }
373
BytecodeOperandSignedByte(int operand_index)374 TNode<Int8T> InterpreterAssembler::BytecodeOperandSignedByte(
375 int operand_index) {
376 DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
377 DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
378 bytecode_, operand_index, operand_scale()));
379 TNode<IntPtrT> operand_offset = OperandOffset(operand_index);
380 return Load<Int8T>(BytecodeArrayTaggedPointer(),
381 IntPtrAdd(BytecodeOffset(), operand_offset));
382 }
383
BytecodeOperandReadUnaligned(int relative_offset,MachineType result_type)384 TNode<Word32T> InterpreterAssembler::BytecodeOperandReadUnaligned(
385 int relative_offset, MachineType result_type) {
386 static const int kMaxCount = 4;
387 DCHECK(!TargetSupportsUnalignedAccess());
388
389 int count;
390 switch (result_type.representation()) {
391 case MachineRepresentation::kWord16:
392 count = 2;
393 break;
394 case MachineRepresentation::kWord32:
395 count = 4;
396 break;
397 default:
398 UNREACHABLE();
399 }
400 MachineType msb_type =
401 result_type.IsSigned() ? MachineType::Int8() : MachineType::Uint8();
402
403 #if V8_TARGET_LITTLE_ENDIAN
404 const int kStep = -1;
405 int msb_offset = count - 1;
406 #elif V8_TARGET_BIG_ENDIAN
407 const int kStep = 1;
408 int msb_offset = 0;
409 #else
410 #error "Unknown Architecture"
411 #endif
412
413 // Read the most signicant bytecode into bytes[0] and then in order
414 // down to least significant in bytes[count - 1].
415 DCHECK_LE(count, kMaxCount);
416 TNode<Word32T> bytes[kMaxCount];
417 for (int i = 0; i < count; i++) {
418 MachineType machine_type = (i == 0) ? msb_type : MachineType::Uint8();
419 TNode<IntPtrT> offset =
420 IntPtrConstant(relative_offset + msb_offset + i * kStep);
421 TNode<IntPtrT> array_offset = IntPtrAdd(BytecodeOffset(), offset);
422 bytes[i] = UncheckedCast<Word32T>(
423 Load(machine_type, BytecodeArrayTaggedPointer(), array_offset));
424 }
425
426 // Pack LSB to MSB.
427 TNode<Word32T> result = bytes[--count];
428 for (int i = 1; --count >= 0; i++) {
429 TNode<Int32T> shift = Int32Constant(i * kBitsPerByte);
430 TNode<Word32T> value = Word32Shl(bytes[count], shift);
431 result = Word32Or(value, result);
432 }
433 return result;
434 }
435
BytecodeOperandUnsignedShort(int operand_index)436 TNode<Uint16T> InterpreterAssembler::BytecodeOperandUnsignedShort(
437 int operand_index) {
438 DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
439 DCHECK_EQ(
440 OperandSize::kShort,
441 Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
442 int operand_offset =
443 Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
444 if (TargetSupportsUnalignedAccess()) {
445 return Load<Uint16T>(
446 BytecodeArrayTaggedPointer(),
447 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
448 } else {
449 return UncheckedCast<Uint16T>(
450 BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint16()));
451 }
452 }
453
BytecodeOperandSignedShort(int operand_index)454 TNode<Int16T> InterpreterAssembler::BytecodeOperandSignedShort(
455 int operand_index) {
456 DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
457 DCHECK_EQ(
458 OperandSize::kShort,
459 Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
460 int operand_offset =
461 Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
462 if (TargetSupportsUnalignedAccess()) {
463 return Load<Int16T>(
464 BytecodeArrayTaggedPointer(),
465 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
466 } else {
467 return UncheckedCast<Int16T>(
468 BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16()));
469 }
470 }
471
BytecodeOperandUnsignedQuad(int operand_index)472 TNode<Uint32T> InterpreterAssembler::BytecodeOperandUnsignedQuad(
473 int operand_index) {
474 DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
475 DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
476 bytecode_, operand_index, operand_scale()));
477 int operand_offset =
478 Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
479 if (TargetSupportsUnalignedAccess()) {
480 return Load<Uint32T>(
481 BytecodeArrayTaggedPointer(),
482 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
483 } else {
484 return UncheckedCast<Uint32T>(
485 BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint32()));
486 }
487 }
488
BytecodeOperandSignedQuad(int operand_index)489 TNode<Int32T> InterpreterAssembler::BytecodeOperandSignedQuad(
490 int operand_index) {
491 DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
492 DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
493 bytecode_, operand_index, operand_scale()));
494 int operand_offset =
495 Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
496 if (TargetSupportsUnalignedAccess()) {
497 return Load<Int32T>(
498 BytecodeArrayTaggedPointer(),
499 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
500 } else {
501 return UncheckedCast<Int32T>(
502 BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32()));
503 }
504 }
505
BytecodeSignedOperand(int operand_index,OperandSize operand_size)506 TNode<Int32T> InterpreterAssembler::BytecodeSignedOperand(
507 int operand_index, OperandSize operand_size) {
508 DCHECK(!Bytecodes::IsUnsignedOperandType(
509 Bytecodes::GetOperandType(bytecode_, operand_index)));
510 switch (operand_size) {
511 case OperandSize::kByte:
512 return BytecodeOperandSignedByte(operand_index);
513 case OperandSize::kShort:
514 return BytecodeOperandSignedShort(operand_index);
515 case OperandSize::kQuad:
516 return BytecodeOperandSignedQuad(operand_index);
517 case OperandSize::kNone:
518 UNREACHABLE();
519 }
520 }
521
BytecodeUnsignedOperand(int operand_index,OperandSize operand_size)522 TNode<Uint32T> InterpreterAssembler::BytecodeUnsignedOperand(
523 int operand_index, OperandSize operand_size) {
524 DCHECK(Bytecodes::IsUnsignedOperandType(
525 Bytecodes::GetOperandType(bytecode_, operand_index)));
526 switch (operand_size) {
527 case OperandSize::kByte:
528 return BytecodeOperandUnsignedByte(operand_index);
529 case OperandSize::kShort:
530 return BytecodeOperandUnsignedShort(operand_index);
531 case OperandSize::kQuad:
532 return BytecodeOperandUnsignedQuad(operand_index);
533 case OperandSize::kNone:
534 UNREACHABLE();
535 }
536 }
537
BytecodeOperandCount(int operand_index)538 TNode<Uint32T> InterpreterAssembler::BytecodeOperandCount(int operand_index) {
539 DCHECK_EQ(OperandType::kRegCount,
540 Bytecodes::GetOperandType(bytecode_, operand_index));
541 OperandSize operand_size =
542 Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
543 return BytecodeUnsignedOperand(operand_index, operand_size);
544 }
545
BytecodeOperandFlag(int operand_index)546 TNode<Uint32T> InterpreterAssembler::BytecodeOperandFlag(int operand_index) {
547 DCHECK_EQ(OperandType::kFlag8,
548 Bytecodes::GetOperandType(bytecode_, operand_index));
549 OperandSize operand_size =
550 Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
551 DCHECK_EQ(operand_size, OperandSize::kByte);
552 return BytecodeUnsignedOperand(operand_index, operand_size);
553 }
554
BytecodeOperandUImm(int operand_index)555 TNode<Uint32T> InterpreterAssembler::BytecodeOperandUImm(int operand_index) {
556 DCHECK_EQ(OperandType::kUImm,
557 Bytecodes::GetOperandType(bytecode_, operand_index));
558 OperandSize operand_size =
559 Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
560 return BytecodeUnsignedOperand(operand_index, operand_size);
561 }
562
BytecodeOperandUImmWord(int operand_index)563 TNode<UintPtrT> InterpreterAssembler::BytecodeOperandUImmWord(
564 int operand_index) {
565 return ChangeUint32ToWord(BytecodeOperandUImm(operand_index));
566 }
567
BytecodeOperandUImmSmi(int operand_index)568 TNode<Smi> InterpreterAssembler::BytecodeOperandUImmSmi(int operand_index) {
569 return SmiFromUint32(BytecodeOperandUImm(operand_index));
570 }
571
BytecodeOperandImm(int operand_index)572 TNode<Int32T> InterpreterAssembler::BytecodeOperandImm(int operand_index) {
573 DCHECK_EQ(OperandType::kImm,
574 Bytecodes::GetOperandType(bytecode_, operand_index));
575 OperandSize operand_size =
576 Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
577 return BytecodeSignedOperand(operand_index, operand_size);
578 }
579
BytecodeOperandImmIntPtr(int operand_index)580 TNode<IntPtrT> InterpreterAssembler::BytecodeOperandImmIntPtr(
581 int operand_index) {
582 return ChangeInt32ToIntPtr(BytecodeOperandImm(operand_index));
583 }
584
BytecodeOperandImmSmi(int operand_index)585 TNode<Smi> InterpreterAssembler::BytecodeOperandImmSmi(int operand_index) {
586 return SmiFromInt32(BytecodeOperandImm(operand_index));
587 }
588
BytecodeOperandIdxInt32(int operand_index)589 TNode<Uint32T> InterpreterAssembler::BytecodeOperandIdxInt32(
590 int operand_index) {
591 DCHECK_EQ(OperandType::kIdx,
592 Bytecodes::GetOperandType(bytecode_, operand_index));
593 OperandSize operand_size =
594 Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
595 return BytecodeUnsignedOperand(operand_index, operand_size);
596 }
597
BytecodeOperandIdx(int operand_index)598 TNode<UintPtrT> InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
599 return ChangeUint32ToWord(BytecodeOperandIdxInt32(operand_index));
600 }
601
BytecodeOperandIdxSmi(int operand_index)602 TNode<Smi> InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) {
603 return SmiTag(Signed(BytecodeOperandIdx(operand_index)));
604 }
605
BytecodeOperandIdxTaggedIndex(int operand_index)606 TNode<TaggedIndex> InterpreterAssembler::BytecodeOperandIdxTaggedIndex(
607 int operand_index) {
608 TNode<IntPtrT> index =
609 ChangeInt32ToIntPtr(Signed(BytecodeOperandIdxInt32(operand_index)));
610 return IntPtrToTaggedIndex(index);
611 }
612
BytecodeOperandConstantPoolIdx(int operand_index)613 TNode<UintPtrT> InterpreterAssembler::BytecodeOperandConstantPoolIdx(
614 int operand_index) {
615 DCHECK_EQ(OperandType::kIdx,
616 Bytecodes::GetOperandType(bytecode_, operand_index));
617 OperandSize operand_size =
618 Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
619 return ChangeUint32ToWord(
620 BytecodeUnsignedOperand(operand_index, operand_size));
621 }
622
BytecodeOperandReg(int operand_index)623 TNode<IntPtrT> InterpreterAssembler::BytecodeOperandReg(int operand_index) {
624 DCHECK(Bytecodes::IsRegisterOperandType(
625 Bytecodes::GetOperandType(bytecode_, operand_index)));
626 OperandSize operand_size =
627 Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
628 return ChangeInt32ToIntPtr(
629 BytecodeSignedOperand(operand_index, operand_size));
630 }
631
BytecodeOperandRuntimeId(int operand_index)632 TNode<Uint32T> InterpreterAssembler::BytecodeOperandRuntimeId(
633 int operand_index) {
634 DCHECK_EQ(OperandType::kRuntimeId,
635 Bytecodes::GetOperandType(bytecode_, operand_index));
636 OperandSize operand_size =
637 Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
638 DCHECK_EQ(operand_size, OperandSize::kShort);
639 return BytecodeUnsignedOperand(operand_index, operand_size);
640 }
641
BytecodeOperandNativeContextIndex(int operand_index)642 TNode<UintPtrT> InterpreterAssembler::BytecodeOperandNativeContextIndex(
643 int operand_index) {
644 DCHECK_EQ(OperandType::kNativeContextIndex,
645 Bytecodes::GetOperandType(bytecode_, operand_index));
646 OperandSize operand_size =
647 Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
648 return ChangeUint32ToWord(
649 BytecodeUnsignedOperand(operand_index, operand_size));
650 }
651
BytecodeOperandIntrinsicId(int operand_index)652 TNode<Uint32T> InterpreterAssembler::BytecodeOperandIntrinsicId(
653 int operand_index) {
654 DCHECK_EQ(OperandType::kIntrinsicId,
655 Bytecodes::GetOperandType(bytecode_, operand_index));
656 OperandSize operand_size =
657 Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
658 DCHECK_EQ(operand_size, OperandSize::kByte);
659 return BytecodeUnsignedOperand(operand_index, operand_size);
660 }
661
LoadConstantPoolEntry(TNode<WordT> index)662 TNode<Object> InterpreterAssembler::LoadConstantPoolEntry(TNode<WordT> index) {
663 TNode<FixedArray> constant_pool = CAST(LoadObjectField(
664 BytecodeArrayTaggedPointer(), BytecodeArray::kConstantPoolOffset));
665 return UnsafeLoadFixedArrayElement(constant_pool,
666 UncheckedCast<IntPtrT>(index), 0);
667 }
668
LoadAndUntagConstantPoolEntry(TNode<WordT> index)669 TNode<IntPtrT> InterpreterAssembler::LoadAndUntagConstantPoolEntry(
670 TNode<WordT> index) {
671 return SmiUntag(CAST(LoadConstantPoolEntry(index)));
672 }
673
LoadConstantPoolEntryAtOperandIndex(int operand_index)674 TNode<Object> InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex(
675 int operand_index) {
676 TNode<UintPtrT> index = BytecodeOperandConstantPoolIdx(operand_index);
677 return LoadConstantPoolEntry(index);
678 }
679
680 TNode<IntPtrT>
LoadAndUntagConstantPoolEntryAtOperandIndex(int operand_index)681 InterpreterAssembler::LoadAndUntagConstantPoolEntryAtOperandIndex(
682 int operand_index) {
683 return SmiUntag(CAST(LoadConstantPoolEntryAtOperandIndex(operand_index)));
684 }
685
LoadFeedbackVector()686 TNode<HeapObject> InterpreterAssembler::LoadFeedbackVector() {
687 TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
688 return CodeStubAssembler::LoadFeedbackVector(function);
689 }
690
CallPrologue()691 void InterpreterAssembler::CallPrologue() {
692 if (!Bytecodes::MakesCallAlongCriticalPath(bytecode_)) {
693 // Bytecodes that make a call along the critical path save the bytecode
694 // offset in the bytecode handler's prologue. For other bytecodes, if
695 // there are multiple calls in the bytecode handler, you need to spill
696 // before each of them, unless SaveBytecodeOffset has explicitly been called
697 // in a path that dominates _all_ of those calls (which we don't track).
698 SaveBytecodeOffset();
699 }
700
701 bytecode_array_valid_ = false;
702 made_call_ = true;
703 }
704
CallEpilogue()705 void InterpreterAssembler::CallEpilogue() {}
706
CallJSAndDispatch(TNode<Object> function,TNode<Context> context,const RegListNodePair & args,ConvertReceiverMode receiver_mode)707 void InterpreterAssembler::CallJSAndDispatch(
708 TNode<Object> function, TNode<Context> context, const RegListNodePair& args,
709 ConvertReceiverMode receiver_mode) {
710 DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
711 DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) ||
712 bytecode_ == Bytecode::kInvokeIntrinsic);
713 DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
714
715 TNode<Word32T> args_count = args.reg_count();
716 if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
717 // Add receiver. It is not included in args as it is implicit.
718 args_count = Int32Add(args_count, Int32Constant(kJSArgcReceiverSlots));
719 }
720
721 Callable callable = CodeFactory::InterpreterPushArgsThenCall(
722 isolate(), receiver_mode, InterpreterPushArgsMode::kOther);
723 TNode<CodeT> code_target = HeapConstant(callable.code());
724
725 TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
726 args_count, args.base_reg_location(),
727 function);
728 // TailCallStubThenDispatch updates accumulator with result.
729 implicit_register_use_ =
730 implicit_register_use_ | ImplicitRegisterUse::kWriteAccumulator;
731 }
732
733 template <class... TArgs>
CallJSAndDispatch(TNode<Object> function,TNode<Context> context,TNode<Word32T> arg_count,ConvertReceiverMode receiver_mode,TArgs...args)734 void InterpreterAssembler::CallJSAndDispatch(TNode<Object> function,
735 TNode<Context> context,
736 TNode<Word32T> arg_count,
737 ConvertReceiverMode receiver_mode,
738 TArgs... args) {
739 DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
740 DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) ||
741 bytecode_ == Bytecode::kInvokeIntrinsic);
742 DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
743 Callable callable = CodeFactory::Call(isolate());
744 TNode<CodeT> code_target = HeapConstant(callable.code());
745
746 arg_count = JSParameterCount(arg_count);
747 if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
748 // The first argument parameter (the receiver) is implied to be undefined.
749 TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
750 context, function, arg_count, args...,
751 UndefinedConstant());
752 } else {
753 TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
754 context, function, arg_count, args...);
755 }
756 // TailCallStubThenDispatch updates accumulator with result.
757 implicit_register_use_ =
758 implicit_register_use_ | ImplicitRegisterUse::kWriteAccumulator;
759 }
760
761 // Instantiate CallJSAndDispatch() for argument counts used by interpreter
762 // generator.
763 template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
764 TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count,
765 ConvertReceiverMode receiver_mode);
766 template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
767 TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count,
768 ConvertReceiverMode receiver_mode, TNode<Object>);
769 template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
770 TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count,
771 ConvertReceiverMode receiver_mode, TNode<Object>, TNode<Object>);
772 template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
773 TNode<Object> function, TNode<Context> context, TNode<Word32T> arg_count,
774 ConvertReceiverMode receiver_mode, TNode<Object>, TNode<Object>,
775 TNode<Object>);
776
CallJSWithSpreadAndDispatch(TNode<Object> function,TNode<Context> context,const RegListNodePair & args,TNode<UintPtrT> slot_id,TNode<HeapObject> maybe_feedback_vector)777 void InterpreterAssembler::CallJSWithSpreadAndDispatch(
778 TNode<Object> function, TNode<Context> context, const RegListNodePair& args,
779 TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector) {
780 DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
781 DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), ConvertReceiverMode::kAny);
782 LazyNode<Object> receiver = [=] { return LoadRegisterAtOperandIndex(1); };
783 CollectCallFeedback(function, receiver, context, maybe_feedback_vector,
784 slot_id);
785 Comment("call using CallWithSpread builtin");
786 Callable callable = CodeFactory::InterpreterPushArgsThenCall(
787 isolate(), ConvertReceiverMode::kAny,
788 InterpreterPushArgsMode::kWithFinalSpread);
789 TNode<CodeT> code_target = HeapConstant(callable.code());
790
791 TNode<Word32T> args_count = args.reg_count();
792 TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
793 args_count, args.base_reg_location(),
794 function);
795 // TailCallStubThenDispatch updates accumulator with result.
796 implicit_register_use_ =
797 implicit_register_use_ | ImplicitRegisterUse::kWriteAccumulator;
798 }
799
Construct(TNode<Object> target,TNode<Context> context,TNode<Object> new_target,const RegListNodePair & args,TNode<UintPtrT> slot_id,TNode<HeapObject> maybe_feedback_vector)800 TNode<Object> InterpreterAssembler::Construct(
801 TNode<Object> target, TNode<Context> context, TNode<Object> new_target,
802 const RegListNodePair& args, TNode<UintPtrT> slot_id,
803 TNode<HeapObject> maybe_feedback_vector) {
804 DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
805 TVARIABLE(Object, var_result);
806 TVARIABLE(AllocationSite, var_site);
807 Label return_result(this), construct_generic(this),
808 construct_array(this, &var_site);
809
810 TNode<Word32T> args_count = JSParameterCount(args.reg_count());
811 CollectConstructFeedback(context, target, new_target, maybe_feedback_vector,
812 slot_id, UpdateFeedbackMode::kOptionalFeedback,
813 &construct_generic, &construct_array, &var_site);
814
815 BIND(&construct_generic);
816 {
817 // TODO(bmeurer): Remove the generic type_info parameter from the Construct.
818 Comment("call using Construct builtin");
819 Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
820 isolate(), InterpreterPushArgsMode::kOther);
821 var_result =
822 CallStub(callable, context, args_count, args.base_reg_location(),
823 target, new_target, UndefinedConstant());
824 Goto(&return_result);
825 }
826
827 BIND(&construct_array);
828 {
829 // TODO(bmeurer): Introduce a dedicated builtin to deal with the Array
830 // constructor feedback collection inside of Ignition.
831 Comment("call using ConstructArray builtin");
832 Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
833 isolate(), InterpreterPushArgsMode::kArrayFunction);
834 var_result =
835 CallStub(callable, context, args_count, args.base_reg_location(),
836 target, new_target, var_site.value());
837 Goto(&return_result);
838 }
839
840 BIND(&return_result);
841 return var_result.value();
842 }
843
ConstructWithSpread(TNode<Object> target,TNode<Context> context,TNode<Object> new_target,const RegListNodePair & args,TNode<UintPtrT> slot_id,TNode<HeapObject> maybe_feedback_vector)844 TNode<Object> InterpreterAssembler::ConstructWithSpread(
845 TNode<Object> target, TNode<Context> context, TNode<Object> new_target,
846 const RegListNodePair& args, TNode<UintPtrT> slot_id,
847 TNode<HeapObject> maybe_feedback_vector) {
848 // TODO(bmeurer): Unify this with the Construct bytecode feedback
849 // above once we have a way to pass the AllocationSite to the Array
850 // constructor _and_ spread the last argument at the same time.
851 DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
852 Label extra_checks(this, Label::kDeferred), construct(this);
853 GotoIf(IsUndefined(maybe_feedback_vector), &construct);
854
855 TNode<FeedbackVector> feedback_vector = CAST(maybe_feedback_vector);
856
857 // Increment the call count.
858 IncrementCallCount(feedback_vector, slot_id);
859
860 // Check if we have monomorphic {new_target} feedback already.
861 TNode<MaybeObject> feedback =
862 LoadFeedbackVectorSlot(feedback_vector, slot_id);
863 Branch(IsWeakReferenceToObject(feedback, new_target), &construct,
864 &extra_checks);
865
866 BIND(&extra_checks);
867 {
868 Label check_initialized(this), initialize(this), mark_megamorphic(this);
869
870 // Check if it is a megamorphic {new_target}.
871 Comment("check if megamorphic");
872 TNode<BoolT> is_megamorphic = TaggedEqual(
873 feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
874 GotoIf(is_megamorphic, &construct);
875
876 Comment("check if weak reference");
877 GotoIfNot(IsWeakOrCleared(feedback), &check_initialized);
878
879 // If the weak reference is cleared, we have a new chance to become
880 // monomorphic.
881 Comment("check if weak reference is cleared");
882 Branch(IsCleared(feedback), &initialize, &mark_megamorphic);
883
884 BIND(&check_initialized);
885 {
886 // Check if it is uninitialized.
887 Comment("check if uninitialized");
888 TNode<BoolT> is_uninitialized =
889 TaggedEqual(feedback, UninitializedSymbolConstant());
890 Branch(is_uninitialized, &initialize, &mark_megamorphic);
891 }
892
893 BIND(&initialize);
894 {
895 Comment("check if function in same native context");
896 GotoIf(TaggedIsSmi(new_target), &mark_megamorphic);
897 // Check if the {new_target} is a JSFunction or JSBoundFunction
898 // in the current native context.
899 TVARIABLE(HeapObject, var_current, CAST(new_target));
900 Label loop(this, &var_current), done_loop(this);
901 Goto(&loop);
902 BIND(&loop);
903 {
904 Label if_boundfunction(this), if_function(this);
905 TNode<HeapObject> current = var_current.value();
906 TNode<Uint16T> current_instance_type = LoadInstanceType(current);
907 GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
908 &if_boundfunction);
909 Branch(IsJSFunctionInstanceType(current_instance_type), &if_function,
910 &mark_megamorphic);
911
912 BIND(&if_function);
913 {
914 // Check that the JSFunction {current} is in the current native
915 // context.
916 TNode<Context> current_context =
917 CAST(LoadObjectField(current, JSFunction::kContextOffset));
918 TNode<NativeContext> current_native_context =
919 LoadNativeContext(current_context);
920 Branch(
921 TaggedEqual(LoadNativeContext(context), current_native_context),
922 &done_loop, &mark_megamorphic);
923 }
924
925 BIND(&if_boundfunction);
926 {
927 // Continue with the [[BoundTargetFunction]] of {current}.
928 var_current = LoadObjectField<HeapObject>(
929 current, JSBoundFunction::kBoundTargetFunctionOffset);
930 Goto(&loop);
931 }
932 }
933 BIND(&done_loop);
934 StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id,
935 CAST(new_target));
936 ReportFeedbackUpdate(feedback_vector, slot_id,
937 "ConstructWithSpread:Initialize");
938 Goto(&construct);
939 }
940
941 BIND(&mark_megamorphic);
942 {
943 // MegamorphicSentinel is an immortal immovable object so
944 // write-barrier is not needed.
945 Comment("transition to megamorphic");
946 DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol));
947 StoreFeedbackVectorSlot(
948 feedback_vector, slot_id,
949 HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
950 SKIP_WRITE_BARRIER);
951 ReportFeedbackUpdate(feedback_vector, slot_id,
952 "ConstructWithSpread:TransitionMegamorphic");
953 Goto(&construct);
954 }
955 }
956
957 BIND(&construct);
958 Comment("call using ConstructWithSpread builtin");
959 Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
960 isolate(), InterpreterPushArgsMode::kWithFinalSpread);
961 TNode<Word32T> args_count = JSParameterCount(args.reg_count());
962 return CallStub(callable, context, args_count, args.base_reg_location(),
963 target, new_target, UndefinedConstant());
964 }
965
966 template <class T>
CallRuntimeN(TNode<Uint32T> function_id,TNode<Context> context,const RegListNodePair & args,int return_count)967 TNode<T> InterpreterAssembler::CallRuntimeN(TNode<Uint32T> function_id,
968 TNode<Context> context,
969 const RegListNodePair& args,
970 int return_count) {
971 DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
972 DCHECK(Bytecodes::IsCallRuntime(bytecode_));
973 Callable callable = CodeFactory::InterpreterCEntry(isolate(), return_count);
974 TNode<CodeT> code_target = HeapConstant(callable.code());
975
976 // Get the function entry from the function id.
977 TNode<RawPtrT> function_table = ReinterpretCast<RawPtrT>(ExternalConstant(
978 ExternalReference::runtime_function_table_address(isolate())));
979 TNode<Word32T> function_offset =
980 Int32Mul(function_id, Int32Constant(sizeof(Runtime::Function)));
981 TNode<WordT> function =
982 IntPtrAdd(function_table, ChangeUint32ToWord(function_offset));
983 TNode<RawPtrT> function_entry = Load<RawPtrT>(
984 function, IntPtrConstant(offsetof(Runtime::Function, entry)));
985
986 return CallStub<T>(callable.descriptor(), code_target, context,
987 args.reg_count(), args.base_reg_location(),
988 function_entry);
989 }
990
991 template V8_EXPORT_PRIVATE TNode<Object> InterpreterAssembler::CallRuntimeN(
992 TNode<Uint32T> function_id, TNode<Context> context,
993 const RegListNodePair& args, int return_count);
994 template V8_EXPORT_PRIVATE TNode<PairT<Object, Object>>
995 InterpreterAssembler::CallRuntimeN(TNode<Uint32T> function_id,
996 TNode<Context> context,
997 const RegListNodePair& args,
998 int return_count);
999
UpdateInterruptBudget(TNode<Int32T> weight,bool backward)1000 void InterpreterAssembler::UpdateInterruptBudget(TNode<Int32T> weight,
1001 bool backward) {
1002 Comment("[ UpdateInterruptBudget");
1003
1004 // Assert that the weight is positive (negative weights should be implemented
1005 // as backward updates).
1006 CSA_DCHECK(this, Int32GreaterThanOrEqual(weight, Int32Constant(0)));
1007
1008 Label load_budget_from_bytecode(this), load_budget_done(this);
1009 TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
1010 TNode<FeedbackCell> feedback_cell =
1011 LoadObjectField<FeedbackCell>(function, JSFunction::kFeedbackCellOffset);
1012 TNode<Int32T> old_budget = LoadObjectField<Int32T>(
1013 feedback_cell, FeedbackCell::kInterruptBudgetOffset);
1014
1015 // Make sure we include the current bytecode in the budget calculation.
1016 TNode<Int32T> budget_after_bytecode =
1017 Int32Sub(old_budget, Int32Constant(CurrentBytecodeSize()));
1018
1019 Label done(this);
1020 TVARIABLE(Int32T, new_budget);
1021 if (backward) {
1022 // Update budget by |weight| and check if it reaches zero.
1023 new_budget = Int32Sub(budget_after_bytecode, weight);
1024 TNode<BoolT> condition =
1025 Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0));
1026 Label ok(this), interrupt_check(this, Label::kDeferred);
1027 Branch(condition, &ok, &interrupt_check);
1028
1029 BIND(&interrupt_check);
1030 // JumpLoop should do a stack check as part of the interrupt.
1031 CallRuntime(bytecode() == Bytecode::kJumpLoop
1032 ? Runtime::kBytecodeBudgetInterruptWithStackCheck
1033 : Runtime::kBytecodeBudgetInterrupt,
1034 GetContext(), function);
1035 Goto(&done);
1036
1037 BIND(&ok);
1038 } else {
1039 // For a forward jump, we know we only increase the interrupt budget, so
1040 // no need to check if it's below zero.
1041 new_budget = Int32Add(budget_after_bytecode, weight);
1042 }
1043
1044 // Update budget.
1045 StoreObjectFieldNoWriteBarrier(
1046 feedback_cell, FeedbackCell::kInterruptBudgetOffset, new_budget.value());
1047 Goto(&done);
1048 BIND(&done);
1049 Comment("] UpdateInterruptBudget");
1050 }
1051
Advance()1052 TNode<IntPtrT> InterpreterAssembler::Advance() {
1053 return Advance(CurrentBytecodeSize());
1054 }
1055
Advance(int delta)1056 TNode<IntPtrT> InterpreterAssembler::Advance(int delta) {
1057 return Advance(IntPtrConstant(delta));
1058 }
1059
Advance(TNode<IntPtrT> delta,bool backward)1060 TNode<IntPtrT> InterpreterAssembler::Advance(TNode<IntPtrT> delta,
1061 bool backward) {
1062 #ifdef V8_TRACE_UNOPTIMIZED
1063 TraceBytecode(Runtime::kTraceUnoptimizedBytecodeExit);
1064 #endif
1065 TNode<IntPtrT> next_offset = backward ? IntPtrSub(BytecodeOffset(), delta)
1066 : IntPtrAdd(BytecodeOffset(), delta);
1067 bytecode_offset_ = next_offset;
1068 return next_offset;
1069 }
1070
Jump(TNode<IntPtrT> jump_offset,bool backward)1071 void InterpreterAssembler::Jump(TNode<IntPtrT> jump_offset, bool backward) {
1072 DCHECK(!Bytecodes::IsStarLookahead(bytecode_, operand_scale_));
1073
1074 UpdateInterruptBudget(TruncateIntPtrToInt32(jump_offset), backward);
1075 TNode<IntPtrT> new_bytecode_offset = Advance(jump_offset, backward);
1076 TNode<RawPtrT> target_bytecode =
1077 UncheckedCast<RawPtrT>(LoadBytecode(new_bytecode_offset));
1078 DispatchToBytecode(target_bytecode, new_bytecode_offset);
1079 }
1080
Jump(TNode<IntPtrT> jump_offset)1081 void InterpreterAssembler::Jump(TNode<IntPtrT> jump_offset) {
1082 Jump(jump_offset, false);
1083 }
1084
JumpBackward(TNode<IntPtrT> jump_offset)1085 void InterpreterAssembler::JumpBackward(TNode<IntPtrT> jump_offset) {
1086 Jump(jump_offset, true);
1087 }
1088
JumpConditional(TNode<BoolT> condition,TNode<IntPtrT> jump_offset)1089 void InterpreterAssembler::JumpConditional(TNode<BoolT> condition,
1090 TNode<IntPtrT> jump_offset) {
1091 Label match(this), no_match(this);
1092
1093 Branch(condition, &match, &no_match);
1094 BIND(&match);
1095 Jump(jump_offset);
1096 BIND(&no_match);
1097 Dispatch();
1098 }
1099
JumpConditionalByImmediateOperand(TNode<BoolT> condition,int operand_index)1100 void InterpreterAssembler::JumpConditionalByImmediateOperand(
1101 TNode<BoolT> condition, int operand_index) {
1102 Label match(this), no_match(this);
1103
1104 Branch(condition, &match, &no_match);
1105 BIND(&match);
1106 TNode<IntPtrT> jump_offset = Signed(BytecodeOperandUImmWord(operand_index));
1107 Jump(jump_offset);
1108 BIND(&no_match);
1109 Dispatch();
1110 }
1111
JumpConditionalByConstantOperand(TNode<BoolT> condition,int operand_index)1112 void InterpreterAssembler::JumpConditionalByConstantOperand(
1113 TNode<BoolT> condition, int operand_index) {
1114 Label match(this), no_match(this);
1115
1116 Branch(condition, &match, &no_match);
1117 BIND(&match);
1118 TNode<IntPtrT> jump_offset =
1119 LoadAndUntagConstantPoolEntryAtOperandIndex(operand_index);
1120 Jump(jump_offset);
1121 BIND(&no_match);
1122 Dispatch();
1123 }
1124
JumpIfTaggedEqual(TNode<Object> lhs,TNode<Object> rhs,TNode<IntPtrT> jump_offset)1125 void InterpreterAssembler::JumpIfTaggedEqual(TNode<Object> lhs,
1126 TNode<Object> rhs,
1127 TNode<IntPtrT> jump_offset) {
1128 JumpConditional(TaggedEqual(lhs, rhs), jump_offset);
1129 }
1130
JumpIfTaggedEqual(TNode<Object> lhs,TNode<Object> rhs,int operand_index)1131 void InterpreterAssembler::JumpIfTaggedEqual(TNode<Object> lhs,
1132 TNode<Object> rhs,
1133 int operand_index) {
1134 JumpConditionalByImmediateOperand(TaggedEqual(lhs, rhs), operand_index);
1135 }
1136
JumpIfTaggedEqualConstant(TNode<Object> lhs,TNode<Object> rhs,int operand_index)1137 void InterpreterAssembler::JumpIfTaggedEqualConstant(TNode<Object> lhs,
1138 TNode<Object> rhs,
1139 int operand_index) {
1140 JumpConditionalByConstantOperand(TaggedEqual(lhs, rhs), operand_index);
1141 }
1142
JumpIfTaggedNotEqual(TNode<Object> lhs,TNode<Object> rhs,TNode<IntPtrT> jump_offset)1143 void InterpreterAssembler::JumpIfTaggedNotEqual(TNode<Object> lhs,
1144 TNode<Object> rhs,
1145 TNode<IntPtrT> jump_offset) {
1146 JumpConditional(TaggedNotEqual(lhs, rhs), jump_offset);
1147 }
1148
JumpIfTaggedNotEqual(TNode<Object> lhs,TNode<Object> rhs,int operand_index)1149 void InterpreterAssembler::JumpIfTaggedNotEqual(TNode<Object> lhs,
1150 TNode<Object> rhs,
1151 int operand_index) {
1152 JumpConditionalByImmediateOperand(TaggedNotEqual(lhs, rhs), operand_index);
1153 }
1154
JumpIfTaggedNotEqualConstant(TNode<Object> lhs,TNode<Object> rhs,int operand_index)1155 void InterpreterAssembler::JumpIfTaggedNotEqualConstant(TNode<Object> lhs,
1156 TNode<Object> rhs,
1157 int operand_index) {
1158 JumpConditionalByConstantOperand(TaggedNotEqual(lhs, rhs), operand_index);
1159 }
1160
LoadBytecode(TNode<IntPtrT> bytecode_offset)1161 TNode<WordT> InterpreterAssembler::LoadBytecode(
1162 TNode<IntPtrT> bytecode_offset) {
1163 TNode<Uint8T> bytecode =
1164 Load<Uint8T>(BytecodeArrayTaggedPointer(), bytecode_offset);
1165 return ChangeUint32ToWord(bytecode);
1166 }
1167
StarDispatchLookahead(TNode<WordT> target_bytecode)1168 void InterpreterAssembler::StarDispatchLookahead(TNode<WordT> target_bytecode) {
1169 Label do_inline_star(this), done(this);
1170
1171 // Check whether the following opcode is one of the short Star codes. All
1172 // opcodes higher than the short Star variants are invalid, and invalid
1173 // opcodes are never deliberately written, so we can use a one-sided check.
1174 // This is no less secure than the normal-length Star handler, which performs
1175 // no validation on its operand.
1176 STATIC_ASSERT(static_cast<int>(Bytecode::kLastShortStar) + 1 ==
1177 static_cast<int>(Bytecode::kIllegal));
1178 STATIC_ASSERT(Bytecode::kIllegal == Bytecode::kLast);
1179 TNode<Int32T> first_short_star_bytecode =
1180 Int32Constant(static_cast<int>(Bytecode::kFirstShortStar));
1181 TNode<BoolT> is_star = Uint32GreaterThanOrEqual(
1182 TruncateWordToInt32(target_bytecode), first_short_star_bytecode);
1183 Branch(is_star, &do_inline_star, &done);
1184
1185 BIND(&do_inline_star);
1186 {
1187 InlineShortStar(target_bytecode);
1188
1189 // Rather than merging control flow to a single indirect jump, we can get
1190 // better branch prediction by duplicating it. This is because the
1191 // instruction following a merged X + StarN is a bad predictor of the
1192 // instruction following a non-merged X, and vice versa.
1193 DispatchToBytecode(LoadBytecode(BytecodeOffset()), BytecodeOffset());
1194 }
1195 BIND(&done);
1196 }
1197
InlineShortStar(TNode<WordT> target_bytecode)1198 void InterpreterAssembler::InlineShortStar(TNode<WordT> target_bytecode) {
1199 Bytecode previous_bytecode = bytecode_;
1200 ImplicitRegisterUse previous_acc_use = implicit_register_use_;
1201
1202 // At this point we don't know statically what bytecode we're executing, but
1203 // kStar0 has the right attributes (namely, no operands) for any of the short
1204 // Star codes.
1205 bytecode_ = Bytecode::kStar0;
1206 implicit_register_use_ = ImplicitRegisterUse::kNone;
1207
1208 #ifdef V8_TRACE_UNOPTIMIZED
1209 TraceBytecode(Runtime::kTraceUnoptimizedBytecodeEntry);
1210 #endif
1211
1212 StoreRegisterForShortStar(GetAccumulator(), target_bytecode);
1213
1214 DCHECK_EQ(implicit_register_use_,
1215 Bytecodes::GetImplicitRegisterUse(bytecode_));
1216
1217 Advance();
1218 bytecode_ = previous_bytecode;
1219 implicit_register_use_ = previous_acc_use;
1220 }
1221
Dispatch()1222 void InterpreterAssembler::Dispatch() {
1223 Comment("========= Dispatch");
1224 DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
1225 TNode<IntPtrT> target_offset = Advance();
1226 TNode<WordT> target_bytecode = LoadBytecode(target_offset);
1227 DispatchToBytecodeWithOptionalStarLookahead(target_bytecode);
1228 }
1229
DispatchToBytecodeWithOptionalStarLookahead(TNode<WordT> target_bytecode)1230 void InterpreterAssembler::DispatchToBytecodeWithOptionalStarLookahead(
1231 TNode<WordT> target_bytecode) {
1232 if (Bytecodes::IsStarLookahead(bytecode_, operand_scale_)) {
1233 StarDispatchLookahead(target_bytecode);
1234 }
1235 DispatchToBytecode(target_bytecode, BytecodeOffset());
1236 }
1237
DispatchToBytecode(TNode<WordT> target_bytecode,TNode<IntPtrT> new_bytecode_offset)1238 void InterpreterAssembler::DispatchToBytecode(
1239 TNode<WordT> target_bytecode, TNode<IntPtrT> new_bytecode_offset) {
1240 if (V8_IGNITION_DISPATCH_COUNTING_BOOL) {
1241 TraceBytecodeDispatch(target_bytecode);
1242 }
1243
1244 TNode<RawPtrT> target_code_entry = Load<RawPtrT>(
1245 DispatchTablePointer(), TimesSystemPointerSize(target_bytecode));
1246
1247 DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset);
1248 }
1249
DispatchToBytecodeHandlerEntry(TNode<RawPtrT> handler_entry,TNode<IntPtrT> bytecode_offset)1250 void InterpreterAssembler::DispatchToBytecodeHandlerEntry(
1251 TNode<RawPtrT> handler_entry, TNode<IntPtrT> bytecode_offset) {
1252 TailCallBytecodeDispatch(
1253 InterpreterDispatchDescriptor{}, handler_entry, GetAccumulatorUnchecked(),
1254 bytecode_offset, BytecodeArrayTaggedPointer(), DispatchTablePointer());
1255 }
1256
DispatchWide(OperandScale operand_scale)1257 void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
1258 // Dispatching a wide bytecode requires treating the prefix
1259 // bytecode a base pointer into the dispatch table and dispatching
1260 // the bytecode that follows relative to this base.
1261 //
1262 // Indices 0-255 correspond to bytecodes with operand_scale == 0
1263 // Indices 256-511 correspond to bytecodes with operand_scale == 1
1264 // Indices 512-767 correspond to bytecodes with operand_scale == 2
1265 DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
1266 TNode<IntPtrT> next_bytecode_offset = Advance(1);
1267 TNode<WordT> next_bytecode = LoadBytecode(next_bytecode_offset);
1268
1269 if (V8_IGNITION_DISPATCH_COUNTING_BOOL) {
1270 TraceBytecodeDispatch(next_bytecode);
1271 }
1272
1273 TNode<IntPtrT> base_index;
1274 switch (operand_scale) {
1275 case OperandScale::kDouble:
1276 base_index = IntPtrConstant(1 << kBitsPerByte);
1277 break;
1278 case OperandScale::kQuadruple:
1279 base_index = IntPtrConstant(2 << kBitsPerByte);
1280 break;
1281 default:
1282 UNREACHABLE();
1283 }
1284 TNode<WordT> target_index = IntPtrAdd(base_index, next_bytecode);
1285 TNode<RawPtrT> target_code_entry = Load<RawPtrT>(
1286 DispatchTablePointer(), TimesSystemPointerSize(target_index));
1287
1288 DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset);
1289 }
1290
UpdateInterruptBudgetOnReturn()1291 void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
1292 // TODO(rmcilroy): Investigate whether it is worth supporting self
1293 // optimization of primitive functions like FullCodegen.
1294
1295 // Update profiling count by the number of bytes between the end of the
1296 // current bytecode and the start of the first one, to simulate backedge to
1297 // start of function.
1298 //
1299 // With headers and current offset, the bytecode array layout looks like:
1300 //
1301 // <---------- simulated backedge ----------
1302 // | header | first bytecode | .... | return bytecode |
1303 // |<------ current offset ------->
1304 // ^ tagged bytecode array pointer
1305 //
1306 // UpdateInterruptBudget already handles adding the bytecode size to the
1307 // length of the back-edge, so we just have to correct for the non-zero offset
1308 // of the first bytecode.
1309
1310 TNode<Int32T> profiling_weight =
1311 Int32Sub(TruncateIntPtrToInt32(BytecodeOffset()),
1312 Int32Constant(kFirstBytecodeOffset));
1313 UpdateInterruptBudget(profiling_weight, true);
1314 }
1315
LoadOsrUrgencyAndInstallTarget()1316 TNode<Int16T> InterpreterAssembler::LoadOsrUrgencyAndInstallTarget() {
1317 // We're loading a 16-bit field, mask it.
1318 return UncheckedCast<Int16T>(Word32And(
1319 LoadObjectField<Int16T>(BytecodeArrayTaggedPointer(),
1320 BytecodeArray::kOsrUrgencyAndInstallTargetOffset),
1321 0xFFFF));
1322 }
1323
Abort(AbortReason abort_reason)1324 void InterpreterAssembler::Abort(AbortReason abort_reason) {
1325 TNode<Smi> abort_id = SmiConstant(abort_reason);
1326 CallRuntime(Runtime::kAbort, GetContext(), abort_id);
1327 }
1328
AbortIfWordNotEqual(TNode<WordT> lhs,TNode<WordT> rhs,AbortReason abort_reason)1329 void InterpreterAssembler::AbortIfWordNotEqual(TNode<WordT> lhs,
1330 TNode<WordT> rhs,
1331 AbortReason abort_reason) {
1332 Label ok(this), abort(this, Label::kDeferred);
1333 Branch(WordEqual(lhs, rhs), &ok, &abort);
1334
1335 BIND(&abort);
1336 Abort(abort_reason);
1337 Goto(&ok);
1338
1339 BIND(&ok);
1340 }
1341
OnStackReplacement(TNode<Context> context,TNode<IntPtrT> relative_jump)1342 void InterpreterAssembler::OnStackReplacement(TNode<Context> context,
1343 TNode<IntPtrT> relative_jump) {
1344 TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
1345 TNode<HeapObject> shared_info = LoadJSFunctionSharedFunctionInfo(function);
1346 TNode<Object> sfi_data =
1347 LoadObjectField(shared_info, SharedFunctionInfo::kFunctionDataOffset);
1348 TNode<Uint16T> data_type = LoadInstanceType(CAST(sfi_data));
1349
1350 Label baseline(this);
1351 GotoIf(InstanceTypeEqual(data_type, CODET_TYPE), &baseline);
1352 {
1353 Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate());
1354 CallStub(callable, context);
1355 JumpBackward(relative_jump);
1356 }
1357
1358 BIND(&baseline);
1359 {
1360 Callable callable =
1361 CodeFactory::InterpreterOnStackReplacement_ToBaseline(isolate());
1362 // We already compiled the baseline code, so we don't need to handle failed
1363 // compilation as in the Ignition -> Turbofan case. Therefore we can just
1364 // tailcall to the OSR builtin.
1365 SaveBytecodeOffset();
1366 TailCallStub(callable, context);
1367 }
1368 }
1369
TraceBytecode(Runtime::FunctionId function_id)1370 void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
1371 CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(),
1372 SmiTag(BytecodeOffset()), GetAccumulatorUnchecked());
1373 }
1374
TraceBytecodeDispatch(TNode<WordT> target_bytecode)1375 void InterpreterAssembler::TraceBytecodeDispatch(TNode<WordT> target_bytecode) {
1376 TNode<ExternalReference> counters_table = ExternalConstant(
1377 ExternalReference::interpreter_dispatch_counters(isolate()));
1378 TNode<IntPtrT> source_bytecode_table_index = IntPtrConstant(
1379 static_cast<int>(bytecode_) * (static_cast<int>(Bytecode::kLast) + 1));
1380
1381 TNode<WordT> counter_offset = TimesSystemPointerSize(
1382 IntPtrAdd(source_bytecode_table_index, target_bytecode));
1383 TNode<IntPtrT> old_counter = Load<IntPtrT>(counters_table, counter_offset);
1384
1385 Label counter_ok(this), counter_saturated(this, Label::kDeferred);
1386
1387 TNode<BoolT> counter_reached_max = WordEqual(
1388 old_counter, IntPtrConstant(std::numeric_limits<uintptr_t>::max()));
1389 Branch(counter_reached_max, &counter_saturated, &counter_ok);
1390
1391 BIND(&counter_ok);
1392 {
1393 TNode<IntPtrT> new_counter = IntPtrAdd(old_counter, IntPtrConstant(1));
1394 StoreNoWriteBarrier(MachineType::PointerRepresentation(), counters_table,
1395 counter_offset, new_counter);
1396 Goto(&counter_saturated);
1397 }
1398
1399 BIND(&counter_saturated);
1400 }
1401
1402 // static
TargetSupportsUnalignedAccess()1403 bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
1404 #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_RISCV64
1405 return false;
1406 #elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390 || \
1407 V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC || \
1408 V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_LOONG64
1409 return true;
1410 #else
1411 #error "Unknown Architecture"
1412 #endif
1413 }
1414
AbortIfRegisterCountInvalid(TNode<FixedArrayBase> parameters_and_registers,TNode<IntPtrT> formal_parameter_count,TNode<UintPtrT> register_count)1415 void InterpreterAssembler::AbortIfRegisterCountInvalid(
1416 TNode<FixedArrayBase> parameters_and_registers,
1417 TNode<IntPtrT> formal_parameter_count, TNode<UintPtrT> register_count) {
1418 TNode<IntPtrT> array_size =
1419 LoadAndUntagFixedArrayBaseLength(parameters_and_registers);
1420
1421 Label ok(this), abort(this, Label::kDeferred);
1422 Branch(UintPtrLessThanOrEqual(
1423 IntPtrAdd(formal_parameter_count, register_count), array_size),
1424 &ok, &abort);
1425
1426 BIND(&abort);
1427 Abort(AbortReason::kInvalidParametersAndRegistersInGenerator);
1428 Goto(&ok);
1429
1430 BIND(&ok);
1431 }
1432
ExportParametersAndRegisterFile(TNode<FixedArray> array,const RegListNodePair & registers,TNode<Int32T> formal_parameter_count)1433 TNode<FixedArray> InterpreterAssembler::ExportParametersAndRegisterFile(
1434 TNode<FixedArray> array, const RegListNodePair& registers,
1435 TNode<Int32T> formal_parameter_count) {
1436 // Store the formal parameters (without receiver) followed by the
1437 // registers into the generator's internal parameters_and_registers field.
1438 TNode<IntPtrT> formal_parameter_count_intptr =
1439 Signed(ChangeUint32ToWord(formal_parameter_count));
1440 TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count());
1441 if (FLAG_debug_code) {
1442 CSA_DCHECK(this, IntPtrEqual(registers.base_reg_location(),
1443 RegisterLocation(Register(0))));
1444 AbortIfRegisterCountInvalid(array, formal_parameter_count_intptr,
1445 register_count);
1446 }
1447
1448 {
1449 TVARIABLE(IntPtrT, var_index);
1450 var_index = IntPtrConstant(0);
1451
1452 // Iterate over parameters and write them into the array.
1453 Label loop(this, &var_index), done_loop(this);
1454
1455 TNode<IntPtrT> reg_base =
1456 IntPtrConstant(Register::FromParameterIndex(0).ToOperand() + 1);
1457
1458 Goto(&loop);
1459 BIND(&loop);
1460 {
1461 TNode<IntPtrT> index = var_index.value();
1462 GotoIfNot(UintPtrLessThan(index, formal_parameter_count_intptr),
1463 &done_loop);
1464
1465 TNode<IntPtrT> reg_index = IntPtrAdd(reg_base, index);
1466 TNode<Object> value = LoadRegister(reg_index);
1467
1468 StoreFixedArrayElement(array, index, value);
1469
1470 var_index = IntPtrAdd(index, IntPtrConstant(1));
1471 Goto(&loop);
1472 }
1473 BIND(&done_loop);
1474 }
1475
1476 {
1477 // Iterate over register file and write values into array.
1478 // The mapping of register to array index must match that used in
1479 // BytecodeGraphBuilder::VisitResumeGenerator.
1480 TVARIABLE(IntPtrT, var_index);
1481 var_index = IntPtrConstant(0);
1482
1483 Label loop(this, &var_index), done_loop(this);
1484 Goto(&loop);
1485 BIND(&loop);
1486 {
1487 TNode<IntPtrT> index = var_index.value();
1488 GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
1489
1490 TNode<IntPtrT> reg_index =
1491 IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
1492 TNode<Object> value = LoadRegister(reg_index);
1493
1494 TNode<IntPtrT> array_index =
1495 IntPtrAdd(formal_parameter_count_intptr, index);
1496 StoreFixedArrayElement(array, array_index, value);
1497
1498 var_index = IntPtrAdd(index, IntPtrConstant(1));
1499 Goto(&loop);
1500 }
1501 BIND(&done_loop);
1502 }
1503
1504 return array;
1505 }
1506
ImportRegisterFile(TNode<FixedArray> array,const RegListNodePair & registers,TNode<Int32T> formal_parameter_count)1507 TNode<FixedArray> InterpreterAssembler::ImportRegisterFile(
1508 TNode<FixedArray> array, const RegListNodePair& registers,
1509 TNode<Int32T> formal_parameter_count) {
1510 TNode<IntPtrT> formal_parameter_count_intptr =
1511 Signed(ChangeUint32ToWord(formal_parameter_count));
1512 TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count());
1513 if (FLAG_debug_code) {
1514 CSA_DCHECK(this, IntPtrEqual(registers.base_reg_location(),
1515 RegisterLocation(Register(0))));
1516 AbortIfRegisterCountInvalid(array, formal_parameter_count_intptr,
1517 register_count);
1518 }
1519
1520 TVARIABLE(IntPtrT, var_index, IntPtrConstant(0));
1521
1522 // Iterate over array and write values into register file. Also erase the
1523 // array contents to not keep them alive artificially.
1524 Label loop(this, &var_index), done_loop(this);
1525 Goto(&loop);
1526 BIND(&loop);
1527 {
1528 TNode<IntPtrT> index = var_index.value();
1529 GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
1530
1531 TNode<IntPtrT> array_index =
1532 IntPtrAdd(formal_parameter_count_intptr, index);
1533 TNode<Object> value = LoadFixedArrayElement(array, array_index);
1534
1535 TNode<IntPtrT> reg_index =
1536 IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
1537 StoreRegister(value, reg_index);
1538
1539 StoreFixedArrayElement(array, array_index, StaleRegisterConstant());
1540
1541 var_index = IntPtrAdd(index, IntPtrConstant(1));
1542 Goto(&loop);
1543 }
1544 BIND(&done_loop);
1545
1546 return array;
1547 }
1548
CurrentBytecodeSize() const1549 int InterpreterAssembler::CurrentBytecodeSize() const {
1550 return Bytecodes::Size(bytecode_, operand_scale_);
1551 }
1552
ToNumberOrNumeric(Object::Conversion mode)1553 void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) {
1554 TNode<Object> object = GetAccumulator();
1555 TNode<Context> context = GetContext();
1556
1557 TVARIABLE(Smi, var_type_feedback);
1558 TVARIABLE(Numeric, var_result);
1559 Label if_done(this), if_objectissmi(this), if_objectisheapnumber(this),
1560 if_objectisother(this, Label::kDeferred);
1561
1562 GotoIf(TaggedIsSmi(object), &if_objectissmi);
1563 Branch(IsHeapNumber(CAST(object)), &if_objectisheapnumber, &if_objectisother);
1564
1565 BIND(&if_objectissmi);
1566 {
1567 var_result = CAST(object);
1568 var_type_feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall);
1569 Goto(&if_done);
1570 }
1571
1572 BIND(&if_objectisheapnumber);
1573 {
1574 var_result = CAST(object);
1575 var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber);
1576 Goto(&if_done);
1577 }
1578
1579 BIND(&if_objectisother);
1580 {
1581 auto builtin = Builtin::kNonNumberToNumber;
1582 if (mode == Object::Conversion::kToNumeric) {
1583 builtin = Builtin::kNonNumberToNumeric;
1584 // Special case for collecting BigInt feedback.
1585 Label not_bigint(this);
1586 GotoIfNot(IsBigInt(CAST(object)), ¬_bigint);
1587 {
1588 var_result = CAST(object);
1589 var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt);
1590 Goto(&if_done);
1591 }
1592 BIND(¬_bigint);
1593 }
1594
1595 // Convert {object} by calling out to the appropriate builtin.
1596 var_result = CAST(CallBuiltin(builtin, context, object));
1597 var_type_feedback = SmiConstant(BinaryOperationFeedback::kAny);
1598 Goto(&if_done);
1599 }
1600
1601 BIND(&if_done);
1602
1603 // Record the type feedback collected for {object}.
1604 TNode<UintPtrT> slot_index = BytecodeOperandIdx(0);
1605 TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
1606
1607 MaybeUpdateFeedback(var_type_feedback.value(), maybe_feedback_vector,
1608 slot_index);
1609
1610 SetAccumulator(var_result.value());
1611 Dispatch();
1612 }
1613
1614 } // namespace interpreter
1615 } // namespace internal
1616 } // namespace v8
1617