1 // Copyright 2021 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef V8_MAGLEV_MAGLEV_IR_H_
6 #define V8_MAGLEV_MAGLEV_IR_H_
7
8 #include "src/base/bit-field.h"
9 #include "src/base/macros.h"
10 #include "src/base/small-vector.h"
11 #include "src/base/threaded-list.h"
12 #include "src/codegen/label.h"
13 #include "src/codegen/reglist.h"
14 #include "src/common/globals.h"
15 #include "src/common/operation.h"
16 #include "src/compiler/backend/instruction.h"
17 #include "src/compiler/heap-refs.h"
18 #include "src/interpreter/bytecode-register.h"
19 #include "src/maglev/maglev-compilation-unit.h"
20 #include "src/objects/smi.h"
21 #include "src/roots/roots.h"
22 #include "src/utils/utils.h"
23 #include "src/zone/zone.h"
24
25 namespace v8 {
26 namespace internal {
27 namespace maglev {
28
29 class BasicBlock;
30 class ProcessingState;
31 class MaglevCodeGenState;
32 class MaglevCompilationUnit;
33 class MaglevGraphLabeller;
34 class MaglevVregAllocationState;
35 class CompactInterpreterFrameState;
36
37 // Nodes are either
38 // 1. side-effecting or value-holding SSA nodes in the body of basic blocks, or
39 // 2. Control nodes that store the control flow at the end of basic blocks, and
40 // form a separate node hierarchy to non-control nodes.
41 //
42 // The macro lists below must match the node class hierarchy.
43
44 #define GENERIC_OPERATIONS_NODE_LIST(V) \
45 V(GenericAdd) \
46 V(GenericSubtract) \
47 V(GenericMultiply) \
48 V(GenericDivide) \
49 V(GenericModulus) \
50 V(GenericExponentiate) \
51 V(GenericBitwiseAnd) \
52 V(GenericBitwiseOr) \
53 V(GenericBitwiseXor) \
54 V(GenericShiftLeft) \
55 V(GenericShiftRight) \
56 V(GenericShiftRightLogical) \
57 V(GenericBitwiseNot) \
58 V(GenericNegate) \
59 V(GenericIncrement) \
60 V(GenericDecrement) \
61 V(GenericEqual) \
62 V(GenericStrictEqual) \
63 V(GenericLessThan) \
64 V(GenericLessThanOrEqual) \
65 V(GenericGreaterThan) \
66 V(GenericGreaterThanOrEqual)
67
68 #define VALUE_NODE_LIST(V) \
69 V(Call) \
70 V(Constant) \
71 V(InitialValue) \
72 V(LoadField) \
73 V(LoadGlobal) \
74 V(LoadNamedGeneric) \
75 V(Phi) \
76 V(RegisterInput) \
77 V(RootConstant) \
78 V(SmiConstant) \
79 V(CheckedSmiTag) \
80 V(CheckedSmiUntag) \
81 V(Int32AddWithOverflow) \
82 V(Int32Constant) \
83 GENERIC_OPERATIONS_NODE_LIST(V)
84
85 #define NODE_LIST(V) \
86 V(CheckMaps) \
87 V(GapMove) \
88 V(StoreField) \
89 VALUE_NODE_LIST(V)
90
91 #define CONDITIONAL_CONTROL_NODE_LIST(V) \
92 V(BranchIfTrue) \
93 V(BranchIfToBooleanTrue)
94
95 #define UNCONDITIONAL_CONTROL_NODE_LIST(V) \
96 V(Jump) \
97 V(JumpLoop)
98
99 #define CONTROL_NODE_LIST(V) \
100 V(Return) \
101 V(Deopt) \
102 CONDITIONAL_CONTROL_NODE_LIST(V) \
103 UNCONDITIONAL_CONTROL_NODE_LIST(V)
104
105 #define NODE_BASE_LIST(V) \
106 NODE_LIST(V) \
107 CONTROL_NODE_LIST(V)
108
109 // Define the opcode enum.
110 #define DEF_OPCODES(type) k##type,
111 enum class Opcode : uint8_t { NODE_BASE_LIST(DEF_OPCODES) };
112 #undef DEF_OPCODES
113 #define PLUS_ONE(type) +1
114 static constexpr int kOpcodeCount = NODE_BASE_LIST(PLUS_ONE);
115 static constexpr Opcode kFirstOpcode = static_cast<Opcode>(0);
116 static constexpr Opcode kLastOpcode = static_cast<Opcode>(kOpcodeCount - 1);
117 #undef PLUS_ONE
118
119 const char* ToString(Opcode opcode);
120 inline std::ostream& operator<<(std::ostream& os, Opcode opcode) {
121 return os << ToString(opcode);
122 }
123
124 #define V(Name) Opcode::k##Name,
125 static constexpr Opcode kFirstValueNodeOpcode =
126 std::min({VALUE_NODE_LIST(V) kLastOpcode});
127 static constexpr Opcode kLastValueNodeOpcode =
128 std::max({VALUE_NODE_LIST(V) kFirstOpcode});
129
130 static constexpr Opcode kFirstNodeOpcode = std::min({NODE_LIST(V) kLastOpcode});
131 static constexpr Opcode kLastNodeOpcode = std::max({NODE_LIST(V) kFirstOpcode});
132
133 static constexpr Opcode kFirstConditionalControlNodeOpcode =
134 std::min({CONDITIONAL_CONTROL_NODE_LIST(V) kLastOpcode});
135 static constexpr Opcode kLastConditionalControlNodeOpcode =
136 std::max({CONDITIONAL_CONTROL_NODE_LIST(V) kFirstOpcode});
137
138 static constexpr Opcode kLastUnconditionalControlNodeOpcode =
139 std::max({UNCONDITIONAL_CONTROL_NODE_LIST(V) kFirstOpcode});
140 static constexpr Opcode kFirstUnconditionalControlNodeOpcode =
141 std::min({UNCONDITIONAL_CONTROL_NODE_LIST(V) kLastOpcode});
142
143 static constexpr Opcode kFirstControlNodeOpcode =
144 std::min({CONTROL_NODE_LIST(V) kLastOpcode});
145 static constexpr Opcode kLastControlNodeOpcode =
146 std::max({CONTROL_NODE_LIST(V) kFirstOpcode});
147 #undef V
148
IsValueNode(Opcode opcode)149 constexpr bool IsValueNode(Opcode opcode) {
150 return kFirstValueNodeOpcode <= opcode && opcode <= kLastValueNodeOpcode;
151 }
IsConditionalControlNode(Opcode opcode)152 constexpr bool IsConditionalControlNode(Opcode opcode) {
153 return kFirstConditionalControlNodeOpcode <= opcode &&
154 opcode <= kLastConditionalControlNodeOpcode;
155 }
IsUnconditionalControlNode(Opcode opcode)156 constexpr bool IsUnconditionalControlNode(Opcode opcode) {
157 return kFirstUnconditionalControlNodeOpcode <= opcode &&
158 opcode <= kLastUnconditionalControlNodeOpcode;
159 }
160
161 // Forward-declare NodeBase sub-hierarchies.
162 class Node;
163 class ControlNode;
164 class ConditionalControlNode;
165 class UnconditionalControlNode;
166 class ValueNode;
167
168 enum class ValueRepresentation {
169 kTagged,
170 kUntagged,
171 };
172
173 #define DEF_FORWARD_DECLARATION(type, ...) class type;
174 NODE_BASE_LIST(DEF_FORWARD_DECLARATION)
175 #undef DEF_FORWARD_DECLARATION
176
177 using NodeIdT = uint32_t;
178 static constexpr uint32_t kInvalidNodeId = 0;
179
180 class OpProperties {
181 public:
is_call()182 constexpr bool is_call() const { return kIsCallBit::decode(bitfield_); }
can_eager_deopt()183 constexpr bool can_eager_deopt() const {
184 return kCanEagerDeoptBit::decode(bitfield_);
185 }
can_lazy_deopt()186 constexpr bool can_lazy_deopt() const {
187 return kCanLazyDeoptBit::decode(bitfield_);
188 }
can_read()189 constexpr bool can_read() const { return kCanReadBit::decode(bitfield_); }
can_write()190 constexpr bool can_write() const { return kCanWriteBit::decode(bitfield_); }
non_memory_side_effects()191 constexpr bool non_memory_side_effects() const {
192 return kNonMemorySideEffectsBit::decode(bitfield_);
193 }
is_untagged_value()194 constexpr bool is_untagged_value() const {
195 return kUntaggedValueBit::decode(bitfield_);
196 }
197
is_pure()198 constexpr bool is_pure() const {
199 return (bitfield_ | kPureMask) == kPureValue;
200 }
is_required_when_unused()201 constexpr bool is_required_when_unused() const {
202 return can_write() || non_memory_side_effects();
203 }
204
205 constexpr OpProperties operator|(const OpProperties& that) {
206 return OpProperties(bitfield_ | that.bitfield_);
207 }
208
Pure()209 static constexpr OpProperties Pure() { return OpProperties(kPureValue); }
Call()210 static constexpr OpProperties Call() {
211 return OpProperties(kIsCallBit::encode(true));
212 }
EagerDeopt()213 static constexpr OpProperties EagerDeopt() {
214 return OpProperties(kCanEagerDeoptBit::encode(true));
215 }
LazyDeopt()216 static constexpr OpProperties LazyDeopt() {
217 return OpProperties(kCanLazyDeoptBit::encode(true));
218 }
Reading()219 static constexpr OpProperties Reading() {
220 return OpProperties(kCanReadBit::encode(true));
221 }
Writing()222 static constexpr OpProperties Writing() {
223 return OpProperties(kCanWriteBit::encode(true));
224 }
NonMemorySideEffects()225 static constexpr OpProperties NonMemorySideEffects() {
226 return OpProperties(kNonMemorySideEffectsBit::encode(true));
227 }
UntaggedValue()228 static constexpr OpProperties UntaggedValue() {
229 return OpProperties(kUntaggedValueBit::encode(true));
230 }
JSCall()231 static constexpr OpProperties JSCall() {
232 return Call() | NonMemorySideEffects() | LazyDeopt();
233 }
AnySideEffects()234 static constexpr OpProperties AnySideEffects() {
235 return Reading() | Writing() | NonMemorySideEffects();
236 }
237
OpProperties(uint32_t bitfield)238 constexpr explicit OpProperties(uint32_t bitfield) : bitfield_(bitfield) {}
uint32_t()239 operator uint32_t() const { return bitfield_; }
240
241 private:
242 using kIsCallBit = base::BitField<bool, 0, 1>;
243 using kCanEagerDeoptBit = kIsCallBit::Next<bool, 1>;
244 using kCanLazyDeoptBit = kCanEagerDeoptBit::Next<bool, 1>;
245 using kCanReadBit = kCanLazyDeoptBit::Next<bool, 1>;
246 using kCanWriteBit = kCanReadBit::Next<bool, 1>;
247 using kNonMemorySideEffectsBit = kCanWriteBit::Next<bool, 1>;
248 using kUntaggedValueBit = kNonMemorySideEffectsBit::Next<bool, 1>;
249
250 static const uint32_t kPureMask = kCanReadBit::kMask | kCanWriteBit::kMask |
251 kNonMemorySideEffectsBit::kMask;
252 static const uint32_t kPureValue = kCanReadBit::encode(false) |
253 kCanWriteBit::encode(false) |
254 kNonMemorySideEffectsBit::encode(false);
255
256 const uint32_t bitfield_;
257
258 public:
259 static const size_t kSize = kUntaggedValueBit::kLastUsedBit + 1;
260 };
261
262 class ValueLocation {
263 public:
264 ValueLocation() = default;
265
266 template <typename... Args>
SetUnallocated(Args &&...args)267 void SetUnallocated(Args&&... args) {
268 DCHECK(operand_.IsInvalid());
269 operand_ = compiler::UnallocatedOperand(args...);
270 }
271
272 template <typename... Args>
SetAllocated(Args &&...args)273 void SetAllocated(Args&&... args) {
274 DCHECK(operand_.IsUnallocated());
275 operand_ = compiler::AllocatedOperand(args...);
276 }
277
278 // Only to be used on inputs that inherit allocation.
279 template <typename... Args>
InjectAllocated(Args &&...args)280 void InjectAllocated(Args&&... args) {
281 operand_ = compiler::AllocatedOperand(args...);
282 }
283
284 template <typename... Args>
SetConstant(Args &&...args)285 void SetConstant(Args&&... args) {
286 DCHECK(operand_.IsUnallocated());
287 operand_ = compiler::ConstantOperand(args...);
288 }
289
AssignedRegister()290 Register AssignedRegister() const {
291 return Register::from_code(
292 compiler::AllocatedOperand::cast(operand_).register_code());
293 }
294
operand()295 const compiler::InstructionOperand& operand() const { return operand_; }
operand()296 const compiler::InstructionOperand& operand() { return operand_; }
297
298 private:
299 compiler::InstructionOperand operand_;
300 };
301
302 class InputLocation : public ValueLocation {
303 public:
next_use_id()304 NodeIdT next_use_id() const { return next_use_id_; }
305 // Used in ValueNode::mark_use
get_next_use_id_address()306 NodeIdT* get_next_use_id_address() { return &next_use_id_; }
307
308 private:
309 NodeIdT next_use_id_ = kInvalidNodeId;
310 };
311
312 class Input : public InputLocation {
313 public:
Input(ValueNode * node)314 explicit Input(ValueNode* node) : node_(node) {}
node()315 ValueNode* node() const { return node_; }
316
317 private:
318 ValueNode* node_;
319 };
320
321 class CheckpointedInterpreterState {
322 public:
323 CheckpointedInterpreterState() = default;
CheckpointedInterpreterState(BytecodeOffset bytecode_position,const CompactInterpreterFrameState * state)324 CheckpointedInterpreterState(BytecodeOffset bytecode_position,
325 const CompactInterpreterFrameState* state)
326 : bytecode_position(bytecode_position), register_frame(state) {}
327
328 BytecodeOffset bytecode_position = BytecodeOffset::None();
329 const CompactInterpreterFrameState* register_frame = nullptr;
330 };
331
332 class DeoptInfo {
333 protected:
334 DeoptInfo(Zone* zone, const MaglevCompilationUnit& compilation_unit,
335 CheckpointedInterpreterState checkpoint);
336
337 public:
338 CheckpointedInterpreterState state;
339 InputLocation* input_locations = nullptr;
340 Label deopt_entry_label;
341 int deopt_index = -1;
342 };
343
344 class EagerDeoptInfo : public DeoptInfo {
345 public:
EagerDeoptInfo(Zone * zone,const MaglevCompilationUnit & compilation_unit,CheckpointedInterpreterState checkpoint)346 EagerDeoptInfo(Zone* zone, const MaglevCompilationUnit& compilation_unit,
347 CheckpointedInterpreterState checkpoint)
348 : DeoptInfo(zone, compilation_unit, checkpoint) {}
349 };
350
351 class LazyDeoptInfo : public DeoptInfo {
352 public:
LazyDeoptInfo(Zone * zone,const MaglevCompilationUnit & compilation_unit,CheckpointedInterpreterState checkpoint)353 LazyDeoptInfo(Zone* zone, const MaglevCompilationUnit& compilation_unit,
354 CheckpointedInterpreterState checkpoint)
355 : DeoptInfo(zone, compilation_unit, checkpoint) {}
356
357 int deopting_call_return_pc = -1;
358 interpreter::Register result_location =
359 interpreter::Register::invalid_value();
360 };
361
362 // Dummy type for the initial raw allocation.
363 struct NodeWithInlineInputs {};
364
365 namespace detail {
366 // Helper for getting the static opcode of a Node subclass. This is in a
367 // "detail" namespace rather than in NodeBase because we can't template
368 // specialize outside of namespace scopes before C++17.
369 template <class T>
370 struct opcode_of_helper;
371
372 #define DEF_OPCODE_OF(Name) \
373 template <> \
374 struct opcode_of_helper<Name> { \
375 static constexpr Opcode value = Opcode::k##Name; \
376 };
377 NODE_BASE_LIST(DEF_OPCODE_OF)
378 #undef DEF_OPCODE_OF
379
380 } // namespace detail
381
382 class NodeBase : public ZoneObject {
383 private:
384 // Bitfield specification.
385 using OpcodeField = base::BitField<Opcode, 0, 6>;
386 STATIC_ASSERT(OpcodeField::is_valid(kLastOpcode));
387 using OpPropertiesField =
388 OpcodeField::Next<OpProperties, OpProperties::kSize>;
389 using InputCountField = OpPropertiesField::Next<uint16_t, 16>;
390
391 protected:
392 // Subclasses may use the remaining bitfield bits.
393 template <class T, int size>
394 using NextBitField = InputCountField::Next<T, size>;
395
396 template <class T>
397 static constexpr Opcode opcode_of = detail::opcode_of_helper<T>::value;
398
399 public:
400 template <class Derived, typename... Args>
New(Zone * zone,std::initializer_list<ValueNode * > inputs,Args &&...args)401 static Derived* New(Zone* zone, std::initializer_list<ValueNode*> inputs,
402 Args&&... args) {
403 Derived* node =
404 Allocate<Derived>(zone, inputs.size(), std::forward<Args>(args)...);
405
406 int i = 0;
407 for (ValueNode* input : inputs) {
408 DCHECK_NOT_NULL(input);
409 node->set_input(i++, input);
410 }
411
412 return node;
413 }
414
415 template <class Derived, typename... Args>
New(Zone * zone,const MaglevCompilationUnit & compilation_unit,CheckpointedInterpreterState checkpoint,Args &&...args)416 static Derived* New(Zone* zone, const MaglevCompilationUnit& compilation_unit,
417 CheckpointedInterpreterState checkpoint, Args&&... args) {
418 Derived* node = New<Derived>(zone, std::forward<Args>(args)...);
419 if constexpr (Derived::kProperties.can_eager_deopt()) {
420 new (node->eager_deopt_info_address())
421 EagerDeoptInfo(zone, compilation_unit, checkpoint);
422 } else {
423 STATIC_ASSERT(Derived::kProperties.can_lazy_deopt());
424 new (node->lazy_deopt_info_address())
425 LazyDeoptInfo(zone, compilation_unit, checkpoint);
426 }
427 return node;
428 }
429
430 // Inputs must be initialized manually.
431 template <class Derived, typename... Args>
New(Zone * zone,size_t input_count,Args &&...args)432 static Derived* New(Zone* zone, size_t input_count, Args&&... args) {
433 Derived* node =
434 Allocate<Derived>(zone, input_count, std::forward<Args>(args)...);
435 return node;
436 }
437
438 // Overwritten by subclasses.
439 static constexpr OpProperties kProperties = OpProperties::Pure();
440
opcode()441 constexpr Opcode opcode() const { return OpcodeField::decode(bit_field_); }
properties()442 OpProperties properties() const {
443 return OpPropertiesField::decode(bit_field_);
444 }
445
446 template <class T>
447 constexpr bool Is() const;
448
449 template <class T>
Cast()450 constexpr T* Cast() {
451 DCHECK(Is<T>());
452 return static_cast<T*>(this);
453 }
454 template <class T>
Cast()455 constexpr const T* Cast() const {
456 DCHECK(Is<T>());
457 return static_cast<const T*>(this);
458 }
459 template <class T>
TryCast()460 constexpr T* TryCast() {
461 return Is<T>() ? static_cast<T*>(this) : nullptr;
462 }
463
has_inputs()464 constexpr bool has_inputs() const { return input_count() > 0; }
input_count()465 constexpr uint16_t input_count() const {
466 return InputCountField::decode(bit_field_);
467 }
468
input(int index)469 Input& input(int index) { return *input_address(index); }
input(int index)470 const Input& input(int index) const { return *input_address(index); }
471
472 // Input iterators, use like:
473 //
474 // for (Input& input : *node) { ... }
begin()475 auto begin() { return std::make_reverse_iterator(input_address(-1)); }
end()476 auto end() {
477 return std::make_reverse_iterator(input_address(input_count() - 1));
478 }
479
id()480 constexpr NodeIdT id() const {
481 DCHECK_NE(id_, kInvalidNodeId);
482 return id_;
483 }
set_id(NodeIdT id)484 void set_id(NodeIdT id) {
485 DCHECK_EQ(id_, kInvalidNodeId);
486 DCHECK_NE(id, kInvalidNodeId);
487 id_ = id;
488 }
489
num_temporaries_needed()490 int num_temporaries_needed() const {
491 #ifdef DEBUG
492 if (kTemporariesState == kUnset) {
493 DCHECK_EQ(num_temporaries_needed_, 0);
494 } else {
495 DCHECK_EQ(kTemporariesState, kNeedsTemporaries);
496 }
497 #endif // DEBUG
498 return num_temporaries_needed_;
499 }
500
temporaries()501 RegList temporaries() const {
502 DCHECK_EQ(kTemporariesState, kHasTemporaries);
503 return temporaries_;
504 }
505
assign_temporaries(RegList list)506 void assign_temporaries(RegList list) {
507 #ifdef DEBUG
508 if (kTemporariesState == kUnset) {
509 DCHECK_EQ(num_temporaries_needed_, 0);
510 } else {
511 DCHECK_EQ(kTemporariesState, kNeedsTemporaries);
512 }
513 kTemporariesState = kHasTemporaries;
514 #endif // DEBUG
515 temporaries_ = list;
516 }
517
518 void Print(std::ostream& os, MaglevGraphLabeller*) const;
519
eager_deopt_info()520 EagerDeoptInfo* eager_deopt_info() {
521 DCHECK(properties().can_eager_deopt());
522 DCHECK(!properties().can_lazy_deopt());
523 return (
524 reinterpret_cast<EagerDeoptInfo*>(input_address(input_count() - 1)) -
525 1);
526 }
527
eager_deopt_info()528 const EagerDeoptInfo* eager_deopt_info() const {
529 DCHECK(properties().can_eager_deopt());
530 DCHECK(!properties().can_lazy_deopt());
531 return (reinterpret_cast<const EagerDeoptInfo*>(
532 input_address(input_count() - 1)) -
533 1);
534 }
535
lazy_deopt_info()536 LazyDeoptInfo* lazy_deopt_info() {
537 DCHECK(properties().can_lazy_deopt());
538 DCHECK(!properties().can_eager_deopt());
539 return (reinterpret_cast<LazyDeoptInfo*>(input_address(input_count() - 1)) -
540 1);
541 }
542
lazy_deopt_info()543 const LazyDeoptInfo* lazy_deopt_info() const {
544 DCHECK(properties().can_lazy_deopt());
545 DCHECK(!properties().can_eager_deopt());
546 return (reinterpret_cast<const LazyDeoptInfo*>(
547 input_address(input_count() - 1)) -
548 1);
549 }
550
551 protected:
NodeBase(uint32_t bitfield)552 explicit NodeBase(uint32_t bitfield) : bit_field_(bitfield) {}
553
input_address(int index)554 Input* input_address(int index) {
555 DCHECK_LT(index, input_count());
556 return reinterpret_cast<Input*>(this) - (index + 1);
557 }
558
input_address(int index)559 const Input* input_address(int index) const {
560 DCHECK_LT(index, input_count());
561 return reinterpret_cast<const Input*>(this) - (index + 1);
562 }
563
set_input(int index,ValueNode * input)564 void set_input(int index, ValueNode* input) {
565 new (input_address(index)) Input(input);
566 }
567
set_temporaries_needed(int value)568 void set_temporaries_needed(int value) {
569 #ifdef DEBUG
570 DCHECK_EQ(kTemporariesState, kUnset);
571 kTemporariesState = kNeedsTemporaries;
572 #endif // DEBUG
573 num_temporaries_needed_ = value;
574 }
575
eager_deopt_info_address()576 EagerDeoptInfo* eager_deopt_info_address() {
577 DCHECK(properties().can_eager_deopt());
578 DCHECK(!properties().can_lazy_deopt());
579 return reinterpret_cast<EagerDeoptInfo*>(input_address(input_count() - 1)) -
580 1;
581 }
582
lazy_deopt_info_address()583 LazyDeoptInfo* lazy_deopt_info_address() {
584 DCHECK(!properties().can_eager_deopt());
585 DCHECK(properties().can_lazy_deopt());
586 return reinterpret_cast<LazyDeoptInfo*>(input_address(input_count() - 1)) -
587 1;
588 }
589
590 private:
591 template <class Derived, typename... Args>
Allocate(Zone * zone,size_t input_count,Args &&...args)592 static Derived* Allocate(Zone* zone, size_t input_count, Args&&... args) {
593 static_assert(
594 !Derived::kProperties.can_eager_deopt() ||
595 !Derived::kProperties.can_lazy_deopt(),
596 "The current deopt info representation, at the end of inputs, requires "
597 "that we cannot have both lazy and eager deopts on a node. If we ever "
598 "need this, we have to update accessors to check node->properties() "
599 "for which deopts are active.");
600 const size_t size_before_node =
601 input_count * sizeof(Input) +
602 (Derived::kProperties.can_eager_deopt() ? sizeof(EagerDeoptInfo) : 0) +
603 (Derived::kProperties.can_lazy_deopt() ? sizeof(LazyDeoptInfo) : 0);
604 const size_t size = size_before_node + sizeof(Derived);
605 intptr_t raw_buffer =
606 reinterpret_cast<intptr_t>(zone->Allocate<NodeWithInlineInputs>(size));
607 void* node_buffer = reinterpret_cast<void*>(raw_buffer + size_before_node);
608 uint32_t bitfield = OpcodeField::encode(opcode_of<Derived>) |
609 OpPropertiesField::encode(Derived::kProperties) |
610 InputCountField::encode(input_count);
611 Derived* node =
612 new (node_buffer) Derived(bitfield, std::forward<Args>(args)...);
613 return node;
614 }
615
616 uint32_t bit_field_;
617
618 private:
619 NodeIdT id_ = kInvalidNodeId;
620
621 union {
622 int num_temporaries_needed_ = 0;
623 RegList temporaries_;
624 };
625 #ifdef DEBUG
626 enum {
627 kUnset,
628 kNeedsTemporaries,
629 kHasTemporaries
630 } kTemporariesState = kUnset;
631 #endif // DEBUG
632
633 NodeBase() = delete;
634 NodeBase(const NodeBase&) = delete;
635 NodeBase(NodeBase&&) = delete;
636 NodeBase& operator=(const NodeBase&) = delete;
637 NodeBase& operator=(NodeBase&&) = delete;
638 };
639
640 template <class T>
Is()641 constexpr bool NodeBase::Is() const {
642 return opcode() == opcode_of<T>;
643 }
644
645 // Specialized sub-hierarchy type checks.
646 template <>
647 constexpr bool NodeBase::Is<ValueNode>() const {
648 return IsValueNode(opcode());
649 }
650 template <>
651 constexpr bool NodeBase::Is<ConditionalControlNode>() const {
652 return IsConditionalControlNode(opcode());
653 }
654 template <>
655 constexpr bool NodeBase::Is<UnconditionalControlNode>() const {
656 return IsUnconditionalControlNode(opcode());
657 }
658
659 // The Node class hierarchy contains all non-control nodes.
660 class Node : public NodeBase {
661 public:
662 using List = base::ThreadedList<Node>;
663
664 inline ValueLocation& result();
665
666 // This might break ThreadedList invariants.
667 // Run ThreadedList::RevalidateTail afterwards.
AddNodeAfter(Node * node)668 void AddNodeAfter(Node* node) {
669 DCHECK_NOT_NULL(node);
670 DCHECK_NULL(node->next_);
671 node->next_ = next_;
672 next_ = node;
673 }
674
NextNode()675 Node* NextNode() const { return next_; }
676
677 protected:
678 using NodeBase::NodeBase;
679
680 private:
next()681 Node** next() { return &next_; }
682 Node* next_ = nullptr;
683
684 friend List;
685 friend base::ThreadedListTraits<Node>;
686 };
687
688 // All non-control nodes with a result.
689 class ValueNode : public Node {
690 public:
result()691 ValueLocation& result() { return result_; }
result()692 const ValueLocation& result() const { return result_; }
693
hint()694 const compiler::InstructionOperand& hint() const {
695 DCHECK_EQ(state_, kSpillOrHint);
696 DCHECK(spill_or_hint_.IsInvalid() || spill_or_hint_.IsUnallocated());
697 return spill_or_hint_;
698 }
699
SetNoSpillOrHint()700 void SetNoSpillOrHint() {
701 DCHECK_EQ(state_, kLastUse);
702 #ifdef DEBUG
703 state_ = kSpillOrHint;
704 #endif // DEBUG
705 spill_or_hint_ = compiler::InstructionOperand();
706 }
707
is_spilled()708 bool is_spilled() const {
709 DCHECK_EQ(state_, kSpillOrHint);
710 return spill_or_hint_.IsStackSlot();
711 }
712
Spill(compiler::AllocatedOperand operand)713 void Spill(compiler::AllocatedOperand operand) {
714 #ifdef DEBUG
715 if (state_ == kLastUse) {
716 state_ = kSpillOrHint;
717 } else {
718 DCHECK(!is_spilled());
719 }
720 #endif // DEBUG
721 DCHECK(operand.IsAnyStackSlot());
722 spill_or_hint_ = operand;
723 }
724
spill_slot()725 compiler::AllocatedOperand spill_slot() const {
726 DCHECK_EQ(state_, kSpillOrHint);
727 DCHECK(is_spilled());
728 return compiler::AllocatedOperand::cast(spill_or_hint_);
729 }
730
mark_use(NodeIdT id,InputLocation * input_location)731 void mark_use(NodeIdT id, InputLocation* input_location) {
732 DCHECK_EQ(state_, kLastUse);
733 DCHECK_NE(id, kInvalidNodeId);
734 DCHECK_LT(start_id(), id);
735 DCHECK_IMPLIES(has_valid_live_range(), id >= end_id_);
736 end_id_ = id;
737 *last_uses_next_use_id_ = id;
738 last_uses_next_use_id_ = input_location->get_next_use_id_address();
739 }
740
741 struct LiveRange {
742 NodeIdT start = kInvalidNodeId;
743 NodeIdT end = kInvalidNodeId;
744 };
745
has_valid_live_range()746 bool has_valid_live_range() const { return end_id_ != 0; }
live_range()747 LiveRange live_range() const { return {start_id(), end_id_}; }
next_use()748 NodeIdT next_use() const { return next_use_; }
749
750 // The following metods should only be used during register allocation, to
751 // mark the _current_ state of this Node according to the register allocator.
set_next_use(NodeIdT use)752 void set_next_use(NodeIdT use) { next_use_ = use; }
753
754 // A node is dead once it has no more upcoming uses.
is_dead()755 bool is_dead() const { return next_use_ == kInvalidNodeId; }
756
AddRegister(Register reg)757 void AddRegister(Register reg) { registers_with_result_.set(reg); }
RemoveRegister(Register reg)758 void RemoveRegister(Register reg) { registers_with_result_.clear(reg); }
ClearRegisters()759 RegList ClearRegisters() {
760 return std::exchange(registers_with_result_, kEmptyRegList);
761 }
762
num_registers()763 int num_registers() const { return registers_with_result_.Count(); }
has_register()764 bool has_register() const { return registers_with_result_ != kEmptyRegList; }
765
allocation()766 compiler::AllocatedOperand allocation() const {
767 if (has_register()) {
768 return compiler::AllocatedOperand(compiler::LocationOperand::REGISTER,
769 MachineRepresentation::kTagged,
770 registers_with_result_.first().code());
771 }
772 DCHECK(is_spilled());
773 return compiler::AllocatedOperand::cast(spill_or_hint_);
774 }
775
is_untagged_value()776 bool is_untagged_value() const { return properties().is_untagged_value(); }
777
value_representation()778 ValueRepresentation value_representation() const {
779 return is_untagged_value() ? ValueRepresentation::kUntagged
780 : ValueRepresentation::kTagged;
781 }
782
783 protected:
ValueNode(uint32_t bitfield)784 explicit ValueNode(uint32_t bitfield)
785 : Node(bitfield),
786 last_uses_next_use_id_(&next_use_)
787 #ifdef DEBUG
788 ,
789 state_(kLastUse)
790 #endif // DEBUG
791 {
792 }
793
794 // Rename for better pairing with `end_id`.
start_id()795 NodeIdT start_id() const { return id(); }
796
797 NodeIdT end_id_ = kInvalidNodeId;
798 NodeIdT next_use_ = kInvalidNodeId;
799 ValueLocation result_;
800 RegList registers_with_result_ = kEmptyRegList;
801 union {
802 // Pointer to the current last use's next_use_id field. Most of the time
803 // this will be a pointer to an Input's next_use_id_ field, but it's
804 // initialized to this node's next_use_ to track the first use.
805 NodeIdT* last_uses_next_use_id_;
806 compiler::InstructionOperand spill_or_hint_;
807 };
808 #ifdef DEBUG
809 // TODO(leszeks): Consider spilling into kSpill and kHint.
810 enum { kLastUse, kSpillOrHint } state_;
811 #endif // DEBUG
812 };
813
result()814 ValueLocation& Node::result() {
815 DCHECK(Is<ValueNode>());
816 return Cast<ValueNode>()->result();
817 }
818
819 template <class Derived>
820 class NodeT : public Node {
821 STATIC_ASSERT(!IsValueNode(opcode_of<Derived>));
822
823 public:
opcode()824 constexpr Opcode opcode() const { return opcode_of<Derived>; }
properties()825 const OpProperties& properties() const { return Derived::kProperties; }
826
827 protected:
NodeT(uint32_t bitfield)828 explicit NodeT(uint32_t bitfield) : Node(bitfield) {
829 DCHECK_EQ(NodeBase::opcode(), opcode_of<Derived>);
830 }
831 };
832
833 template <size_t InputCount, class Derived>
834 class FixedInputNodeT : public NodeT<Derived> {
835 static constexpr size_t kInputCount = InputCount;
836
837 public:
838 // Shadowing for static knowledge.
has_inputs()839 constexpr bool has_inputs() const { return input_count() > 0; }
input_count()840 constexpr uint16_t input_count() const { return kInputCount; }
end()841 auto end() {
842 return std::make_reverse_iterator(this->input_address(input_count() - 1));
843 }
844
845 protected:
FixedInputNodeT(uint32_t bitfield)846 explicit FixedInputNodeT(uint32_t bitfield) : NodeT<Derived>(bitfield) {
847 DCHECK_EQ(NodeBase::input_count(), kInputCount);
848 }
849 };
850
851 template <class Derived>
852 class ValueNodeT : public ValueNode {
853 STATIC_ASSERT(IsValueNode(opcode_of<Derived>));
854
855 public:
opcode()856 constexpr Opcode opcode() const { return opcode_of<Derived>; }
properties()857 const OpProperties& properties() const { return Derived::kProperties; }
858
859 protected:
ValueNodeT(uint32_t bitfield)860 explicit ValueNodeT(uint32_t bitfield) : ValueNode(bitfield) {
861 DCHECK_EQ(NodeBase::opcode(), opcode_of<Derived>);
862 }
863 };
864
865 template <size_t InputCount, class Derived>
866 class FixedInputValueNodeT : public ValueNodeT<Derived> {
867 static constexpr size_t kInputCount = InputCount;
868
869 public:
870 // Shadowing for static knowledge.
has_inputs()871 constexpr bool has_inputs() const { return input_count() > 0; }
input_count()872 constexpr uint16_t input_count() const { return kInputCount; }
end()873 auto end() {
874 return std::make_reverse_iterator(this->input_address(input_count() - 1));
875 }
876
877 protected:
FixedInputValueNodeT(uint32_t bitfield)878 explicit FixedInputValueNodeT(uint32_t bitfield)
879 : ValueNodeT<Derived>(bitfield) {
880 DCHECK_EQ(NodeBase::input_count(), kInputCount);
881 }
882 };
883
884 template <class Derived, Operation kOperation>
885 class UnaryWithFeedbackNode : public FixedInputValueNodeT<1, Derived> {
886 using Base = FixedInputValueNodeT<1, Derived>;
887
888 public:
889 // The implementation currently calls runtime.
890 static constexpr OpProperties kProperties = OpProperties::JSCall();
891
892 static constexpr int kOperandIndex = 0;
operand_input()893 Input& operand_input() { return Node::input(kOperandIndex); }
feedback()894 compiler::FeedbackSource feedback() const { return feedback_; }
895
896 protected:
UnaryWithFeedbackNode(uint32_t bitfield,const compiler::FeedbackSource & feedback)897 explicit UnaryWithFeedbackNode(uint32_t bitfield,
898 const compiler::FeedbackSource& feedback)
899 : Base(bitfield), feedback_(feedback) {}
900
901 void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
902 void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
PrintParams(std::ostream &,MaglevGraphLabeller *)903 void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
904
905 const compiler::FeedbackSource feedback_;
906 };
907
908 template <class Derived, Operation kOperation>
909 class BinaryWithFeedbackNode : public FixedInputValueNodeT<2, Derived> {
910 using Base = FixedInputValueNodeT<2, Derived>;
911
912 public:
913 // The implementation currently calls runtime.
914 static constexpr OpProperties kProperties = OpProperties::JSCall();
915
916 static constexpr int kLeftIndex = 0;
917 static constexpr int kRightIndex = 1;
left_input()918 Input& left_input() { return Node::input(kLeftIndex); }
right_input()919 Input& right_input() { return Node::input(kRightIndex); }
feedback()920 compiler::FeedbackSource feedback() const { return feedback_; }
921
922 protected:
BinaryWithFeedbackNode(uint32_t bitfield,const compiler::FeedbackSource & feedback)923 BinaryWithFeedbackNode(uint32_t bitfield,
924 const compiler::FeedbackSource& feedback)
925 : Base(bitfield), feedback_(feedback) {}
926
927 void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
928 void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
PrintParams(std::ostream &,MaglevGraphLabeller *)929 void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
930
931 const compiler::FeedbackSource feedback_;
932 };
933
934 #define DEF_OPERATION_NODE(Name, Super, OpName) \
935 class Name : public Super<Name, Operation::k##OpName> { \
936 using Base = Super<Name, Operation::k##OpName>; \
937 \
938 public: \
939 Name(uint32_t bitfield, const compiler::FeedbackSource& feedback) \
940 : Base(bitfield, feedback) {} \
941 void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&); \
942 void GenerateCode(MaglevCodeGenState*, const ProcessingState&); \
943 void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} \
944 };
945
946 #define DEF_UNARY_WITH_FEEDBACK_NODE(Name) \
947 DEF_OPERATION_NODE(Generic##Name, UnaryWithFeedbackNode, Name)
948 #define DEF_BINARY_WITH_FEEDBACK_NODE(Name) \
949 DEF_OPERATION_NODE(Generic##Name, BinaryWithFeedbackNode, Name)
950 UNARY_OPERATION_LIST(DEF_UNARY_WITH_FEEDBACK_NODE)
ARITHMETIC_OPERATION_LIST(DEF_BINARY_WITH_FEEDBACK_NODE)951 ARITHMETIC_OPERATION_LIST(DEF_BINARY_WITH_FEEDBACK_NODE)
952 COMPARISON_OPERATION_LIST(DEF_BINARY_WITH_FEEDBACK_NODE)
953 #undef DEF_UNARY_WITH_FEEDBACK_NODE
954 #undef DEF_BINARY_WITH_FEEDBACK_NODE
955
956 class CheckedSmiTag : public FixedInputValueNodeT<1, CheckedSmiTag> {
957 using Base = FixedInputValueNodeT<1, CheckedSmiTag>;
958
959 public:
960 explicit CheckedSmiTag(uint32_t bitfield) : Base(bitfield) {}
961
962 static constexpr OpProperties kProperties = OpProperties::EagerDeopt();
963
964 Input& input() { return Node::input(0); }
965
966 void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
967 void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
968 void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
969 };
970
971 class CheckedSmiUntag : public FixedInputValueNodeT<1, CheckedSmiUntag> {
972 using Base = FixedInputValueNodeT<1, CheckedSmiUntag>;
973
974 public:
CheckedSmiUntag(uint32_t bitfield)975 explicit CheckedSmiUntag(uint32_t bitfield) : Base(bitfield) {}
976
977 static constexpr OpProperties kProperties =
978 OpProperties::EagerDeopt() | OpProperties::UntaggedValue();
979
input()980 Input& input() { return Node::input(0); }
981
982 void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
983 void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
PrintParams(std::ostream &,MaglevGraphLabeller *)984 void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
985 };
986
987 class Int32Constant : public FixedInputValueNodeT<0, Int32Constant> {
988 using Base = FixedInputValueNodeT<0, Int32Constant>;
989
990 public:
Int32Constant(uint32_t bitfield,int32_t value)991 explicit Int32Constant(uint32_t bitfield, int32_t value)
992 : Base(bitfield), value_(value) {}
993
994 static constexpr OpProperties kProperties = OpProperties::UntaggedValue();
995
value()996 int32_t value() const { return value_; }
997
998 void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
999 void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
1000 void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
1001
1002 private:
1003 const int32_t value_;
1004 };
1005
1006 class Int32AddWithOverflow
1007 : public FixedInputValueNodeT<2, Int32AddWithOverflow> {
1008 using Base = FixedInputValueNodeT<2, Int32AddWithOverflow>;
1009
1010 public:
Int32AddWithOverflow(uint32_t bitfield)1011 explicit Int32AddWithOverflow(uint32_t bitfield) : Base(bitfield) {}
1012
1013 static constexpr OpProperties kProperties =
1014 OpProperties::EagerDeopt() | OpProperties::UntaggedValue();
1015
1016 static constexpr int kLeftIndex = 0;
1017 static constexpr int kRightIndex = 1;
left_input()1018 Input& left_input() { return Node::input(kLeftIndex); }
right_input()1019 Input& right_input() { return Node::input(kRightIndex); }
1020
1021 void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
1022 void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
PrintParams(std::ostream &,MaglevGraphLabeller *)1023 void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
1024 };
1025
1026 class InitialValue : public FixedInputValueNodeT<0, InitialValue> {
1027 using Base = FixedInputValueNodeT<0, InitialValue>;
1028
1029 public:
InitialValue(uint32_t bitfield,interpreter::Register source)1030 explicit InitialValue(uint32_t bitfield, interpreter::Register source)
1031 : Base(bitfield), source_(source) {}
1032
source()1033 interpreter::Register source() const { return source_; }
1034
1035 void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
1036 void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
1037 void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
1038
1039 private:
1040 const interpreter::Register source_;
1041 };
1042
1043 class RegisterInput : public FixedInputValueNodeT<0, RegisterInput> {
1044 using Base = FixedInputValueNodeT<0, RegisterInput>;
1045
1046 public:
RegisterInput(uint32_t bitfield,Register input)1047 explicit RegisterInput(uint32_t bitfield, Register input)
1048 : Base(bitfield), input_(input) {}
1049
input()1050 Register input() const { return input_; }
1051
1052 void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
1053 void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
1054 void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
1055
1056 private:
1057 const Register input_;
1058 };
1059
1060 class SmiConstant : public FixedInputValueNodeT<0, SmiConstant> {
1061 using Base = FixedInputValueNodeT<0, SmiConstant>;
1062
1063 public:
SmiConstant(uint32_t bitfield,Smi value)1064 explicit SmiConstant(uint32_t bitfield, Smi value)
1065 : Base(bitfield), value_(value) {}
1066
value()1067 Smi value() const { return value_; }
1068
1069 void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
1070 void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
1071 void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
1072
1073 private:
1074 const Smi value_;
1075 };
1076
1077 class Constant : public FixedInputValueNodeT<0, Constant> {
1078 using Base = FixedInputValueNodeT<0, Constant>;
1079
1080 public:
Constant(uint32_t bitfield,const compiler::HeapObjectRef & object)1081 explicit Constant(uint32_t bitfield, const compiler::HeapObjectRef& object)
1082 : Base(bitfield), object_(object) {}
1083
1084 void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
1085 void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
1086 void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
1087
1088 private:
1089 const compiler::HeapObjectRef object_;
1090 };
1091
1092 class RootConstant : public FixedInputValueNodeT<0, RootConstant> {
1093 using Base = FixedInputValueNodeT<0, RootConstant>;
1094
1095 public:
RootConstant(uint32_t bitfield,RootIndex index)1096 explicit RootConstant(uint32_t bitfield, RootIndex index)
1097 : Base(bitfield), index_(index) {}
1098
index()1099 RootIndex index() const { return index_; }
1100
1101 void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
1102 void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
1103 void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
1104
1105 private:
1106 const RootIndex index_;
1107 };
1108
1109 class CheckMaps : public FixedInputNodeT<1, CheckMaps> {
1110 using Base = FixedInputNodeT<1, CheckMaps>;
1111
1112 public:
CheckMaps(uint32_t bitfield,const compiler::MapRef & map)1113 explicit CheckMaps(uint32_t bitfield, const compiler::MapRef& map)
1114 : Base(bitfield), map_(map) {}
1115
1116 // TODO(verwaest): This just calls in deferred code, so probably we'll need to
1117 // mark that to generate stack maps. Mark as call so we at least clear the
1118 // registers since we currently don't properly spill either.
1119 static constexpr OpProperties kProperties =
1120 OpProperties::EagerDeopt() | OpProperties::Call();
1121
map()1122 compiler::MapRef map() const { return map_; }
1123
1124 static constexpr int kActualMapIndex = 0;
actual_map_input()1125 Input& actual_map_input() { return input(kActualMapIndex); }
1126
1127 void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
1128 void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
1129 void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
1130
1131 private:
1132 const compiler::MapRef map_;
1133 };
1134
1135 class LoadField : public FixedInputValueNodeT<1, LoadField> {
1136 using Base = FixedInputValueNodeT<1, LoadField>;
1137
1138 public:
LoadField(uint32_t bitfield,int handler)1139 explicit LoadField(uint32_t bitfield, int handler)
1140 : Base(bitfield), handler_(handler) {}
1141
1142 static constexpr OpProperties kProperties = OpProperties::Reading();
1143
handler()1144 int handler() const { return handler_; }
1145
1146 static constexpr int kObjectIndex = 0;
object_input()1147 Input& object_input() { return input(kObjectIndex); }
1148
1149 void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
1150 void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
1151 void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
1152
1153 private:
1154 const int handler_;
1155 };
1156
1157 class StoreField : public FixedInputNodeT<2, StoreField> {
1158 using Base = FixedInputNodeT<2, StoreField>;
1159
1160 public:
StoreField(uint32_t bitfield,int handler)1161 explicit StoreField(uint32_t bitfield, int handler)
1162 : Base(bitfield), handler_(handler) {}
1163
1164 static constexpr OpProperties kProperties = OpProperties::Writing();
1165
handler()1166 int handler() const { return handler_; }
1167
1168 static constexpr int kObjectIndex = 0;
1169 static constexpr int kValueIndex = 1;
object_input()1170 Input& object_input() { return input(kObjectIndex); }
value_input()1171 Input& value_input() { return input(kValueIndex); }
1172
1173 void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
1174 void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
1175 void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
1176
1177 private:
1178 const int handler_;
1179 };
1180
1181 class LoadGlobal : public FixedInputValueNodeT<1, LoadGlobal> {
1182 using Base = FixedInputValueNodeT<1, LoadGlobal>;
1183
1184 public:
LoadGlobal(uint32_t bitfield,const compiler::NameRef & name)1185 explicit LoadGlobal(uint32_t bitfield, const compiler::NameRef& name)
1186 : Base(bitfield), name_(name) {}
1187
1188 // The implementation currently calls runtime.
1189 static constexpr OpProperties kProperties = OpProperties::JSCall();
1190
context()1191 Input& context() { return input(0); }
name()1192 const compiler::NameRef& name() const { return name_; }
1193
1194 void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
1195 void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
1196 void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
1197
1198 private:
1199 const compiler::NameRef name_;
1200 };
1201
1202 class LoadNamedGeneric : public FixedInputValueNodeT<2, LoadNamedGeneric> {
1203 using Base = FixedInputValueNodeT<2, LoadNamedGeneric>;
1204
1205 public:
LoadNamedGeneric(uint32_t bitfield,const compiler::NameRef & name,const compiler::FeedbackSource & feedback)1206 explicit LoadNamedGeneric(uint32_t bitfield, const compiler::NameRef& name,
1207 const compiler::FeedbackSource& feedback)
1208 : Base(bitfield), name_(name), feedback_(feedback) {}
1209
1210 // The implementation currently calls runtime.
1211 static constexpr OpProperties kProperties = OpProperties::JSCall();
1212
name()1213 compiler::NameRef name() const { return name_; }
feedback()1214 compiler::FeedbackSource feedback() const { return feedback_; }
1215
1216 static constexpr int kContextIndex = 0;
1217 static constexpr int kObjectIndex = 1;
context()1218 Input& context() { return input(kContextIndex); }
object_input()1219 Input& object_input() { return input(kObjectIndex); }
1220
1221 void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
1222 void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
1223 void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
1224
1225 private:
1226 const compiler::NameRef name_;
1227 const compiler::FeedbackSource feedback_;
1228 };
1229
1230 class GapMove : public FixedInputNodeT<0, GapMove> {
1231 using Base = FixedInputNodeT<0, GapMove>;
1232
1233 public:
GapMove(uint32_t bitfield,compiler::AllocatedOperand source,compiler::AllocatedOperand target)1234 GapMove(uint32_t bitfield, compiler::AllocatedOperand source,
1235 compiler::AllocatedOperand target)
1236 : Base(bitfield), source_(source), target_(target) {}
1237
source()1238 compiler::AllocatedOperand source() const { return source_; }
target()1239 compiler::AllocatedOperand target() const { return target_; }
1240
1241 void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
1242 void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
1243 void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
1244
1245 private:
1246 compiler::AllocatedOperand source_;
1247 compiler::AllocatedOperand target_;
1248 };
1249
1250 // TODO(verwaest): It may make more sense to buffer phis in merged_states until
1251 // we set up the interpreter frame state for code generation. At that point we
1252 // can generate correctly-sized phis.
1253 class Phi : public ValueNodeT<Phi> {
1254 using Base = ValueNodeT<Phi>;
1255
1256 public:
1257 using List = base::ThreadedList<Phi>;
1258
1259 // TODO(jgruber): More intuitive constructors, if possible.
Phi(uint32_t bitfield,interpreter::Register owner,int merge_offset)1260 Phi(uint32_t bitfield, interpreter::Register owner, int merge_offset)
1261 : Base(bitfield), owner_(owner), merge_offset_(merge_offset) {}
1262
owner()1263 interpreter::Register owner() const { return owner_; }
merge_offset()1264 int merge_offset() const { return merge_offset_; }
1265
1266 using Node::set_input;
1267
1268 void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
1269 void AllocateVregInPostProcess(MaglevVregAllocationState*);
1270 void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
1271 void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
1272
1273 private:
next()1274 Phi** next() { return &next_; }
1275
1276 const interpreter::Register owner_;
1277 Phi* next_ = nullptr;
1278 const int merge_offset_;
1279 friend List;
1280 friend base::ThreadedListTraits<Phi>;
1281 };
1282
1283 class Call : public ValueNodeT<Call> {
1284 using Base = ValueNodeT<Call>;
1285
1286 public:
1287 // We assume function and context as fixed inputs.
1288 static constexpr int kFunctionIndex = 0;
1289 static constexpr int kContextIndex = 1;
1290 static constexpr int kFixedInputCount = 2;
1291
1292 // This ctor is used when for variable input counts.
1293 // Inputs must be initialized manually.
Call(uint32_t bitfield,ConvertReceiverMode mode,ValueNode * function,ValueNode * context)1294 Call(uint32_t bitfield, ConvertReceiverMode mode, ValueNode* function,
1295 ValueNode* context)
1296 : Base(bitfield), receiver_mode_(mode) {
1297 set_input(kFunctionIndex, function);
1298 set_input(kContextIndex, context);
1299 }
1300
1301 static constexpr OpProperties kProperties = OpProperties::JSCall();
1302
function()1303 Input& function() { return input(kFunctionIndex); }
function()1304 const Input& function() const { return input(kFunctionIndex); }
context()1305 Input& context() { return input(kContextIndex); }
context()1306 const Input& context() const { return input(kContextIndex); }
num_args()1307 int num_args() const { return input_count() - kFixedInputCount; }
arg(int i)1308 Input& arg(int i) { return input(i + kFixedInputCount); }
set_arg(int i,ValueNode * node)1309 void set_arg(int i, ValueNode* node) {
1310 set_input(i + kFixedInputCount, node);
1311 }
1312
1313 void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
1314 void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
PrintParams(std::ostream &,MaglevGraphLabeller *)1315 void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
1316
1317 private:
1318 ConvertReceiverMode receiver_mode_;
1319 };
1320
1321 // Represents either a direct BasicBlock pointer, or an entry in a list of
1322 // unresolved BasicBlockRefs which will be mutated (in place) at some point into
1323 // direct BasicBlock pointers.
1324 class BasicBlockRef {
1325 struct BasicBlockRefBuilder;
1326
1327 public:
BasicBlockRef()1328 BasicBlockRef() : next_ref_(nullptr) {
1329 #ifdef DEBUG
1330 state_ = kRefList;
1331 #endif
1332 }
BasicBlockRef(BasicBlock * block)1333 explicit BasicBlockRef(BasicBlock* block) : block_ptr_(block) {
1334 #ifdef DEBUG
1335 state_ = kBlockPointer;
1336 #endif
1337 }
1338
1339 // Refs can't be copied or moved, since they are referenced by `this` pointer
1340 // in the ref list.
1341 BasicBlockRef(const BasicBlockRef&) = delete;
1342 BasicBlockRef(BasicBlockRef&&) = delete;
1343 BasicBlockRef& operator=(const BasicBlockRef&) = delete;
1344 BasicBlockRef& operator=(BasicBlockRef&&) = delete;
1345
1346 // Construct a new ref-list mode BasicBlockRef and add it to the given ref
1347 // list.
BasicBlockRef(BasicBlockRef * ref_list_head)1348 explicit BasicBlockRef(BasicBlockRef* ref_list_head) : BasicBlockRef() {
1349 BasicBlockRef* old_next_ptr = MoveToRefList(ref_list_head);
1350 USE(old_next_ptr);
1351 DCHECK_NULL(old_next_ptr);
1352 }
1353
1354 // Change this ref to a direct basic block pointer, returning the old "next"
1355 // pointer of the current ref.
SetToBlockAndReturnNext(BasicBlock * block)1356 BasicBlockRef* SetToBlockAndReturnNext(BasicBlock* block) {
1357 DCHECK_EQ(state_, kRefList);
1358
1359 BasicBlockRef* old_next_ptr = next_ref_;
1360 block_ptr_ = block;
1361 #ifdef DEBUG
1362 state_ = kBlockPointer;
1363 #endif
1364 return old_next_ptr;
1365 }
1366
1367 // Reset this ref list to null, returning the old ref list (i.e. the old
1368 // "next" pointer).
Reset()1369 BasicBlockRef* Reset() {
1370 DCHECK_EQ(state_, kRefList);
1371
1372 BasicBlockRef* old_next_ptr = next_ref_;
1373 next_ref_ = nullptr;
1374 return old_next_ptr;
1375 }
1376
1377 // Move this ref to the given ref list, returning the old "next" pointer of
1378 // the current ref.
MoveToRefList(BasicBlockRef * ref_list_head)1379 BasicBlockRef* MoveToRefList(BasicBlockRef* ref_list_head) {
1380 DCHECK_EQ(state_, kRefList);
1381 DCHECK_EQ(ref_list_head->state_, kRefList);
1382
1383 BasicBlockRef* old_next_ptr = next_ref_;
1384 next_ref_ = ref_list_head->next_ref_;
1385 ref_list_head->next_ref_ = this;
1386 return old_next_ptr;
1387 }
1388
block_ptr()1389 BasicBlock* block_ptr() const {
1390 DCHECK_EQ(state_, kBlockPointer);
1391 return block_ptr_;
1392 }
1393
next_ref()1394 BasicBlockRef* next_ref() const {
1395 DCHECK_EQ(state_, kRefList);
1396 return next_ref_;
1397 }
1398
has_ref()1399 bool has_ref() const {
1400 DCHECK_EQ(state_, kRefList);
1401 return next_ref_ != nullptr;
1402 }
1403
1404 private:
1405 union {
1406 BasicBlock* block_ptr_;
1407 BasicBlockRef* next_ref_;
1408 };
1409 #ifdef DEBUG
1410 enum { kBlockPointer, kRefList } state_;
1411 #endif // DEBUG
1412 };
1413
1414 class ControlNode : public NodeBase {
1415 public:
1416 // A "hole" in control flow is a control node that unconditionally interrupts
1417 // linear control flow (either by jumping or by exiting).
1418 //
1419 // A "post-dominating" hole is a hole that is guaranteed to be be reached in
1420 // control flow after this node (i.e. it is a hole that is a post-dominator
1421 // of this node).
next_post_dominating_hole()1422 ControlNode* next_post_dominating_hole() const {
1423 return next_post_dominating_hole_;
1424 }
set_next_post_dominating_hole(ControlNode * node)1425 void set_next_post_dominating_hole(ControlNode* node) {
1426 DCHECK_IMPLIES(node != nullptr, node->Is<Jump>() || node->Is<Return>() ||
1427 node->Is<Deopt>() ||
1428 node->Is<JumpLoop>());
1429 next_post_dominating_hole_ = node;
1430 }
1431
1432 protected:
1433 using NodeBase::NodeBase;
1434
1435 private:
1436 ControlNode* next_post_dominating_hole_ = nullptr;
1437 };
1438
1439 class UnconditionalControlNode : public ControlNode {
1440 public:
target()1441 BasicBlock* target() const { return target_.block_ptr(); }
predecessor_id()1442 int predecessor_id() const { return predecessor_id_; }
set_predecessor_id(int id)1443 void set_predecessor_id(int id) { predecessor_id_ = id; }
1444
1445 protected:
UnconditionalControlNode(uint32_t bitfield,BasicBlockRef * target_refs)1446 explicit UnconditionalControlNode(uint32_t bitfield,
1447 BasicBlockRef* target_refs)
1448 : ControlNode(bitfield), target_(target_refs) {}
UnconditionalControlNode(uint32_t bitfield,BasicBlock * target)1449 explicit UnconditionalControlNode(uint32_t bitfield, BasicBlock* target)
1450 : ControlNode(bitfield), target_(target) {}
1451
1452 private:
1453 const BasicBlockRef target_;
1454 int predecessor_id_ = 0;
1455 };
1456
1457 template <class Derived>
1458 class UnconditionalControlNodeT : public UnconditionalControlNode {
1459 STATIC_ASSERT(IsUnconditionalControlNode(opcode_of<Derived>));
1460 static constexpr size_t kInputCount = 0;
1461
1462 public:
1463 // Shadowing for static knowledge.
opcode()1464 constexpr Opcode opcode() const { return NodeBase::opcode_of<Derived>; }
has_inputs()1465 constexpr bool has_inputs() const { return input_count() > 0; }
input_count()1466 constexpr uint16_t input_count() const { return kInputCount; }
end()1467 auto end() {
1468 return std::make_reverse_iterator(input_address(input_count() - 1));
1469 }
1470
1471 protected:
UnconditionalControlNodeT(uint32_t bitfield,BasicBlockRef * target_refs)1472 explicit UnconditionalControlNodeT(uint32_t bitfield,
1473 BasicBlockRef* target_refs)
1474 : UnconditionalControlNode(bitfield, target_refs) {
1475 DCHECK_EQ(NodeBase::opcode(), opcode_of<Derived>);
1476 DCHECK_EQ(NodeBase::input_count(), kInputCount);
1477 }
UnconditionalControlNodeT(uint32_t bitfield,BasicBlock * target)1478 explicit UnconditionalControlNodeT(uint32_t bitfield, BasicBlock* target)
1479 : UnconditionalControlNode(bitfield, target) {
1480 DCHECK_EQ(NodeBase::opcode(), opcode_of<Derived>);
1481 DCHECK_EQ(NodeBase::input_count(), kInputCount);
1482 }
1483 };
1484
1485 class ConditionalControlNode : public ControlNode {
1486 public:
ConditionalControlNode(uint32_t bitfield,BasicBlockRef * if_true_refs,BasicBlockRef * if_false_refs)1487 ConditionalControlNode(uint32_t bitfield, BasicBlockRef* if_true_refs,
1488 BasicBlockRef* if_false_refs)
1489 : ControlNode(bitfield),
1490 if_true_(if_true_refs),
1491 if_false_(if_false_refs) {}
1492
if_true()1493 BasicBlock* if_true() const { return if_true_.block_ptr(); }
if_false()1494 BasicBlock* if_false() const { return if_false_.block_ptr(); }
1495
1496 private:
1497 BasicBlockRef if_true_;
1498 BasicBlockRef if_false_;
1499 };
1500
1501 template <size_t InputCount, class Derived>
1502 class ConditionalControlNodeT : public ConditionalControlNode {
1503 STATIC_ASSERT(IsConditionalControlNode(opcode_of<Derived>));
1504 static constexpr size_t kInputCount = InputCount;
1505
1506 public:
1507 // Shadowing for static knowledge.
opcode()1508 constexpr Opcode opcode() const { return NodeBase::opcode_of<Derived>; }
has_inputs()1509 constexpr bool has_inputs() const { return input_count() > 0; }
input_count()1510 constexpr uint16_t input_count() const { return kInputCount; }
end()1511 auto end() {
1512 return std::make_reverse_iterator(input_address(input_count() - 1));
1513 }
1514
1515 protected:
ConditionalControlNodeT(uint32_t bitfield,BasicBlockRef * if_true_refs,BasicBlockRef * if_false_refs)1516 explicit ConditionalControlNodeT(uint32_t bitfield,
1517 BasicBlockRef* if_true_refs,
1518 BasicBlockRef* if_false_refs)
1519 : ConditionalControlNode(bitfield, if_true_refs, if_false_refs) {
1520 DCHECK_EQ(NodeBase::opcode(), opcode_of<Derived>);
1521 DCHECK_EQ(NodeBase::input_count(), kInputCount);
1522 }
1523 };
1524
1525 class Jump : public UnconditionalControlNodeT<Jump> {
1526 using Base = UnconditionalControlNodeT<Jump>;
1527
1528 public:
Jump(uint32_t bitfield,BasicBlockRef * target_refs)1529 explicit Jump(uint32_t bitfield, BasicBlockRef* target_refs)
1530 : Base(bitfield, target_refs) {}
1531
1532 void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
1533 void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
PrintParams(std::ostream &,MaglevGraphLabeller *)1534 void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
1535 };
1536
1537 class JumpLoop : public UnconditionalControlNodeT<JumpLoop> {
1538 using Base = UnconditionalControlNodeT<JumpLoop>;
1539
1540 public:
JumpLoop(uint32_t bitfield,BasicBlock * target)1541 explicit JumpLoop(uint32_t bitfield, BasicBlock* target)
1542 : Base(bitfield, target) {}
1543
JumpLoop(uint32_t bitfield,BasicBlockRef * ref)1544 explicit JumpLoop(uint32_t bitfield, BasicBlockRef* ref)
1545 : Base(bitfield, ref) {}
1546
1547 void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
1548 void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
PrintParams(std::ostream &,MaglevGraphLabeller *)1549 void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
1550 };
1551
1552 class Return : public ControlNode {
1553 public:
Return(uint32_t bitfield)1554 explicit Return(uint32_t bitfield) : ControlNode(bitfield) {
1555 DCHECK_EQ(NodeBase::opcode(), opcode_of<Return>);
1556 }
1557
value_input()1558 Input& value_input() { return input(0); }
1559
1560 void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
1561 void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
PrintParams(std::ostream &,MaglevGraphLabeller *)1562 void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
1563 };
1564
1565 class Deopt : public ControlNode {
1566 public:
Deopt(uint32_t bitfield)1567 explicit Deopt(uint32_t bitfield) : ControlNode(bitfield) {
1568 DCHECK_EQ(NodeBase::opcode(), opcode_of<Deopt>);
1569 }
1570
1571 static constexpr OpProperties kProperties = OpProperties::EagerDeopt();
1572
1573 void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
1574 void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
PrintParams(std::ostream &,MaglevGraphLabeller *)1575 void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
1576 };
1577
1578 class BranchIfTrue : public ConditionalControlNodeT<1, BranchIfTrue> {
1579 using Base = ConditionalControlNodeT<1, BranchIfTrue>;
1580
1581 public:
BranchIfTrue(uint32_t bitfield,BasicBlockRef * if_true_refs,BasicBlockRef * if_false_refs)1582 explicit BranchIfTrue(uint32_t bitfield, BasicBlockRef* if_true_refs,
1583 BasicBlockRef* if_false_refs)
1584 : Base(bitfield, if_true_refs, if_false_refs) {}
1585
condition_input()1586 Input& condition_input() { return input(0); }
1587
1588 void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
1589 void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
PrintParams(std::ostream &,MaglevGraphLabeller *)1590 void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
1591 };
1592
1593 class BranchIfToBooleanTrue
1594 : public ConditionalControlNodeT<1, BranchIfToBooleanTrue> {
1595 using Base = ConditionalControlNodeT<1, BranchIfToBooleanTrue>;
1596
1597 public:
BranchIfToBooleanTrue(uint32_t bitfield,BasicBlockRef * if_true_refs,BasicBlockRef * if_false_refs)1598 explicit BranchIfToBooleanTrue(uint32_t bitfield, BasicBlockRef* if_true_refs,
1599 BasicBlockRef* if_false_refs)
1600 : Base(bitfield, if_true_refs, if_false_refs) {}
1601
1602 static constexpr OpProperties kProperties = OpProperties::Call();
1603
condition_input()1604 Input& condition_input() { return input(0); }
1605
1606 void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
1607 void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
PrintParams(std::ostream &,MaglevGraphLabeller *)1608 void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
1609 };
1610
1611 class BranchIfCompare
1612 : public ConditionalControlNodeT<2, BranchIfToBooleanTrue> {
1613 using Base = ConditionalControlNodeT<2, BranchIfToBooleanTrue>;
1614
1615 public:
1616 static constexpr int kLeftIndex = 0;
1617 static constexpr int kRightIndex = 1;
left_input()1618 Input& left_input() { return NodeBase::input(kLeftIndex); }
right_input()1619 Input& right_input() { return NodeBase::input(kRightIndex); }
1620
BranchIfCompare(uint32_t bitfield,Operation operation,BasicBlockRef * if_true_refs,BasicBlockRef * if_false_refs)1621 explicit BranchIfCompare(uint32_t bitfield, Operation operation,
1622 BasicBlockRef* if_true_refs,
1623 BasicBlockRef* if_false_refs)
1624 : Base(bitfield, if_true_refs, if_false_refs), operation_(operation) {}
1625
1626 void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
1627 void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
PrintParams(std::ostream &,MaglevGraphLabeller *)1628 void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
1629
1630 private:
1631 Operation operation_;
1632 };
1633
1634 } // namespace maglev
1635 } // namespace internal
1636 } // namespace v8
1637
1638 #endif // V8_MAGLEV_MAGLEV_IR_H_
1639