• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2022 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_MAGLEV_MAGLEV_GRAPH_BUILDER_H_
6 #define V8_MAGLEV_MAGLEV_GRAPH_BUILDER_H_
7 
8 #include <type_traits>
9 
10 #include "src/base/optional.h"
11 #include "src/compiler/bytecode-analysis.h"
12 #include "src/compiler/bytecode-liveness-map.h"
13 #include "src/compiler/heap-refs.h"
14 #include "src/compiler/js-heap-broker.h"
15 #include "src/interpreter/bytecode-register.h"
16 #include "src/maglev/maglev-compilation-info.h"
17 #include "src/maglev/maglev-graph-labeller.h"
18 #include "src/maglev/maglev-graph.h"
19 #include "src/maglev/maglev-ir.h"
20 #include "src/utils/memcopy.h"
21 
22 namespace v8 {
23 namespace internal {
24 namespace maglev {
25 
26 class MaglevGraphBuilder {
27  public:
28   explicit MaglevGraphBuilder(LocalIsolate* local_isolate,
29                               MaglevCompilationUnit* compilation_unit);
30 
Build()31   void Build() {
32     for (iterator_.Reset(); !iterator_.done(); iterator_.Advance()) {
33       VisitSingleBytecode();
34       // TODO(v8:7700): Clean up after all bytecodes are supported.
35       if (found_unsupported_bytecode()) break;
36     }
37 
38     // During InterpreterFrameState merge points, we might emit CheckedSmiTags
39     // and add them unsafely to the basic blocks. This addition might break a
40     // list invariant (namely `tail_` might not point to the last element).
41     // We revalidate this invariant here in all basic blocks.
42     for (BasicBlock* block : *graph_) {
43       block->nodes().RevalidateTail();
44     }
45   }
46 
graph()47   Graph* graph() const { return graph_; }
48 
49   // TODO(v8:7700): Clean up after all bytecodes are supported.
found_unsupported_bytecode()50   bool found_unsupported_bytecode() const {
51     return found_unsupported_bytecode_;
52   }
53 
54  private:
CreateEmptyBlock(int offset,BasicBlock * predecessor)55   BasicBlock* CreateEmptyBlock(int offset, BasicBlock* predecessor) {
56     DCHECK_NULL(current_block_);
57     current_block_ = zone()->New<BasicBlock>(nullptr);
58     BasicBlock* result = CreateBlock<Jump>({}, &jump_targets_[offset]);
59     result->set_empty_block_predecessor(predecessor);
60     return result;
61   }
62 
ProcessMergePoint(int offset)63   void ProcessMergePoint(int offset) {
64     // First copy the merge state to be the current state.
65     MergePointInterpreterFrameState& merge_state = *merge_states_[offset];
66     current_interpreter_frame_.CopyFrom(*compilation_unit_, merge_state);
67 
68     if (merge_state.predecessor_count() == 1) return;
69 
70     // Set up edge-split.
71     int predecessor_index = merge_state.predecessor_count() - 1;
72     BasicBlockRef* old_jump_targets = jump_targets_[offset].Reset();
73     while (old_jump_targets != nullptr) {
74       BasicBlock* predecessor = merge_state.predecessor_at(predecessor_index);
75       if (predecessor == nullptr) {
76         // We can have null predecessors if the predecessor is dead.
77         predecessor_index--;
78         continue;
79       }
80       ControlNode* control = predecessor->control_node();
81       if (control->Is<ConditionalControlNode>()) {
82         // CreateEmptyBlock automatically registers itself with the offset.
83         predecessor = CreateEmptyBlock(offset, predecessor);
84         // Set the old predecessor's (the conditional block) reference to
85         // point to the new empty predecessor block.
86         old_jump_targets =
87             old_jump_targets->SetToBlockAndReturnNext(predecessor);
88       } else {
89         // Re-register the block in the offset's ref list.
90         old_jump_targets =
91             old_jump_targets->MoveToRefList(&jump_targets_[offset]);
92       }
93       predecessor->set_predecessor_id(predecessor_index--);
94     }
95 #ifdef DEBUG
96     if (bytecode_analysis().IsLoopHeader(offset)) {
97       // For loops, the JumpLoop block hasn't been generated yet, and so isn't
98       // in the list of jump targets. It's defined to be at index 0, so once
99       // we've processed all the jump targets, the 0 index should be the one
100       // remaining.
101       DCHECK_EQ(predecessor_index, 0);
102     } else {
103       DCHECK_EQ(predecessor_index, -1);
104     }
105 #endif
106     if (has_graph_labeller()) {
107       for (Phi* phi : *merge_states_[offset]->phis()) {
108         graph_labeller()->RegisterNode(phi);
109       }
110     }
111   }
112 
113   // Return true if the given offset is a merge point, i.e. there are jumps
114   // targetting it.
IsOffsetAMergePoint(int offset)115   bool IsOffsetAMergePoint(int offset) {
116     return merge_states_[offset] != nullptr;
117   }
118 
119   // Called when a block is killed by an unconditional eager deopt.
EmitUnconditionalDeopt()120   void EmitUnconditionalDeopt() {
121     // Create a block rather than calling finish, since we don't yet know the
122     // next block's offset before the loop skipping the rest of the bytecodes.
123     BasicBlock* block = CreateBlock<Deopt>({});
124     ResolveJumpsToBlockAtOffset(block, block_offset_);
125 
126     // Skip any bytecodes remaining in the block, up to the next merge point.
127     while (!IsOffsetAMergePoint(iterator_.next_offset())) {
128       iterator_.Advance();
129       if (iterator_.done()) break;
130     }
131 
132     // If there is control flow out of this block, we need to kill the merges
133     // into the control flow targets.
134     interpreter::Bytecode bytecode = iterator_.current_bytecode();
135     if (interpreter::Bytecodes::IsForwardJump(bytecode)) {
136       // Jumps merge into their target, and conditional jumps also merge into
137       // the fallthrough.
138       merge_states_[iterator_.GetJumpTargetOffset()]->MergeDead();
139       if (interpreter::Bytecodes::IsConditionalJump(bytecode)) {
140         merge_states_[iterator_.next_offset()]->MergeDead();
141       }
142     } else if (bytecode == interpreter::Bytecode::kJumpLoop) {
143       // JumpLoop merges into its loop header, which has to be treated specially
144       // by the merge..
145       merge_states_[iterator_.GetJumpTargetOffset()]->MergeDeadLoop();
146     } else if (interpreter::Bytecodes::IsSwitch(bytecode)) {
147       // Switches merge into their targets, and into the fallthrough.
148       for (auto offset : iterator_.GetJumpTableTargetOffsets()) {
149         merge_states_[offset.target_offset]->MergeDead();
150       }
151       merge_states_[iterator_.next_offset()]->MergeDead();
152     } else if (!interpreter::Bytecodes::Returns(bytecode) &&
153                !interpreter::Bytecodes::UnconditionallyThrows(bytecode)) {
154       // Any other bytecode that doesn't return or throw will merge into the
155       // fallthrough.
156       merge_states_[iterator_.next_offset()]->MergeDead();
157     }
158   }
159 
VisitSingleBytecode()160   void VisitSingleBytecode() {
161     int offset = iterator_.current_offset();
162     if (V8_UNLIKELY(merge_states_[offset] != nullptr)) {
163       if (current_block_ != nullptr) {
164         // TODO(leszeks): Re-evaluate this DCHECK, we might hit it if the only
165         // bytecodes in this basic block were only register juggling.
166         // DCHECK(!current_block_->nodes().is_empty());
167         FinishBlock<Jump>(offset, {}, &jump_targets_[offset]);
168 
169         merge_states_[offset]->Merge(*compilation_unit_,
170                                      current_interpreter_frame_,
171                                      graph()->last_block(), offset);
172       }
173       ProcessMergePoint(offset);
174       StartNewBlock(offset);
175     }
176     DCHECK_NOT_NULL(current_block_);
177 #ifdef DEBUG
178     // Clear new nodes for the next VisitFoo
179     new_nodes_.clear();
180 #endif
181     switch (iterator_.current_bytecode()) {
182 #define BYTECODE_CASE(name, ...)       \
183   case interpreter::Bytecode::k##name: \
184     Visit##name();                     \
185     break;
186       BYTECODE_LIST(BYTECODE_CASE)
187 #undef BYTECODE_CASE
188     }
189   }
190 
191 #define BYTECODE_VISITOR(name, ...) void Visit##name();
BYTECODE_LIST(BYTECODE_VISITOR)192   BYTECODE_LIST(BYTECODE_VISITOR)
193 #undef BYTECODE_VISITOR
194 
195   template <typename NodeT>
196   NodeT* AddNode(NodeT* node) {
197     if (node->properties().is_required_when_unused()) {
198       MarkPossibleSideEffect();
199     }
200     current_block_->nodes().Add(node);
201     if (has_graph_labeller()) graph_labeller()->RegisterNode(node);
202 #ifdef DEBUG
203     new_nodes_.insert(node);
204 #endif
205     return node;
206   }
207 
208   template <typename NodeT, typename... Args>
AddNewNode(size_t input_count,Args &&...args)209   NodeT* AddNewNode(size_t input_count, Args&&... args) {
210     return AddNode(
211         CreateNewNode<NodeT>(input_count, std::forward<Args>(args)...));
212   }
213 
214   template <typename NodeT, typename... Args>
AddNewNode(std::initializer_list<ValueNode * > inputs,Args &&...args)215   NodeT* AddNewNode(std::initializer_list<ValueNode*> inputs, Args&&... args) {
216     return AddNode(CreateNewNode<NodeT>(inputs, std::forward<Args>(args)...));
217   }
218 
219   template <typename NodeT, typename... Args>
CreateNewNode(Args &&...args)220   NodeT* CreateNewNode(Args&&... args) {
221     if constexpr (NodeT::kProperties.can_eager_deopt()) {
222       return NodeBase::New<NodeT>(zone(), *compilation_unit_,
223                                   GetLatestCheckpointedState(),
224                                   std::forward<Args>(args)...);
225     } else if constexpr (NodeT::kProperties.can_lazy_deopt()) {
226       return NodeBase::New<NodeT>(zone(), *compilation_unit_,
227                                   GetCheckpointedStateForLazyDeopt(),
228                                   std::forward<Args>(args)...);
229     } else {
230       return NodeBase::New<NodeT>(zone(), std::forward<Args>(args)...);
231     }
232   }
233 
GetContext()234   ValueNode* GetContext() const {
235     return current_interpreter_frame_.get(
236         interpreter::Register::current_context());
237   }
238 
GetSlotOperand(int operand_index)239   FeedbackSlot GetSlotOperand(int operand_index) const {
240     return iterator_.GetSlotOperand(operand_index);
241   }
242 
243   template <class T, typename = std::enable_if_t<
244                          std::is_convertible<T*, Object*>::value>>
GetRefOperand(int operand_index)245   typename compiler::ref_traits<T>::ref_type GetRefOperand(int operand_index) {
246     // The BytecodeArray itself was fetched by using a barrier so all reads
247     // from the constant pool are safe.
248     return MakeRefAssumeMemoryFence(
249         broker(), broker()->CanonicalPersistentHandle(
250                       Handle<T>::cast(iterator_.GetConstantForIndexOperand(
251                           operand_index, local_isolate()))));
252   }
253 
GetConstant(const compiler::ObjectRef & ref)254   ValueNode* GetConstant(const compiler::ObjectRef& ref) {
255     if (ref.IsSmi()) {
256       return AddNewNode<SmiConstant>({}, Smi::FromInt(ref.AsSmi()));
257     }
258     // TODO(leszeks): Detect roots and use RootConstant.
259     return AddNewNode<Constant>({}, ref.AsHeapObject());
260   }
261 
262   // Move an existing ValueNode between two registers. You can pass
263   // virtual_accumulator as the src or dst to move in or out of the accumulator.
MoveNodeBetweenRegisters(interpreter::Register src,interpreter::Register dst)264   void MoveNodeBetweenRegisters(interpreter::Register src,
265                                 interpreter::Register dst) {
266     // We shouldn't be moving newly created nodes between registers.
267     DCHECK_EQ(0, new_nodes_.count(current_interpreter_frame_.get(src)));
268     DCHECK_NOT_NULL(current_interpreter_frame_.get(src));
269 
270     current_interpreter_frame_.set(dst, current_interpreter_frame_.get(src));
271   }
272 
GetTaggedValue(interpreter::Register reg)273   ValueNode* GetTaggedValue(interpreter::Register reg) {
274     // TODO(victorgomes): Add the representation (Tagged/Untagged) in the
275     // InterpreterFrameState, so that we don't need to derefence a node.
276     ValueNode* value = current_interpreter_frame_.get(reg);
277     if (!value->is_untagged_value()) return value;
278     if (value->Is<CheckedSmiUntag>()) {
279       return value->input(0).node();
280     }
281     DCHECK(value->Is<Int32AddWithOverflow>() || value->Is<Int32Constant>());
282     ValueNode* tagged = AddNewNode<CheckedSmiTag>({value});
283     current_interpreter_frame_.set(reg, tagged);
284     return tagged;
285   }
286 
GetSmiUntaggedValue(interpreter::Register reg)287   ValueNode* GetSmiUntaggedValue(interpreter::Register reg) {
288     // TODO(victorgomes): Add the representation (Tagged/Untagged) in the
289     // InterpreterFrameState, so that we don't need to derefence a node.
290     ValueNode* value = current_interpreter_frame_.get(reg);
291     if (value->is_untagged_value()) return value;
292     if (value->Is<CheckedSmiTag>()) return value->input(0).node();
293     // Untag any other value.
294     ValueNode* untagged = AddNewNode<CheckedSmiUntag>({value});
295     current_interpreter_frame_.set(reg, untagged);
296     return untagged;
297   }
298 
GetAccumulatorTaggedValue()299   ValueNode* GetAccumulatorTaggedValue() {
300     return GetTaggedValue(interpreter::Register::virtual_accumulator());
301   }
302 
GetAccumulatorSmiUntaggedValue()303   ValueNode* GetAccumulatorSmiUntaggedValue() {
304     return GetSmiUntaggedValue(interpreter::Register::virtual_accumulator());
305   }
306 
IsRegisterEqualToAccumulator(int operand_index)307   bool IsRegisterEqualToAccumulator(int operand_index) {
308     interpreter::Register source = iterator_.GetRegisterOperand(operand_index);
309     return current_interpreter_frame_.get(source) ==
310            current_interpreter_frame_.accumulator();
311   }
312 
LoadRegisterTaggedValue(int operand_index)313   ValueNode* LoadRegisterTaggedValue(int operand_index) {
314     return GetTaggedValue(iterator_.GetRegisterOperand(operand_index));
315   }
316 
LoadRegisterSmiUntaggedValue(int operand_index)317   ValueNode* LoadRegisterSmiUntaggedValue(int operand_index) {
318     return GetSmiUntaggedValue(iterator_.GetRegisterOperand(operand_index));
319   }
320 
321   template <typename NodeT>
SetAccumulator(NodeT * node)322   void SetAccumulator(NodeT* node) {
323     // Accumulator stores are equivalent to stores to the virtual accumulator
324     // register.
325     StoreRegister(interpreter::Register::virtual_accumulator(), node);
326   }
327 
328   template <typename NodeT>
StoreRegister(interpreter::Register target,NodeT * value)329   void StoreRegister(interpreter::Register target, NodeT* value) {
330     // We should only set register values to nodes that were newly created in
331     // this Visit. Existing nodes should be moved between registers with
332     // MoveNodeBetweenRegisters.
333     DCHECK_NE(0, new_nodes_.count(value));
334     MarkAsLazyDeoptResult(value, target);
335     current_interpreter_frame_.set(target, value);
336   }
337 
GetLatestCheckpointedState()338   CheckpointedInterpreterState GetLatestCheckpointedState() {
339     if (!latest_checkpointed_state_) {
340       latest_checkpointed_state_.emplace(
341           BytecodeOffset(iterator_.current_offset()),
342           zone()->New<CompactInterpreterFrameState>(
343               *compilation_unit_, GetInLiveness(), current_interpreter_frame_));
344     }
345     return *latest_checkpointed_state_;
346   }
347 
GetCheckpointedStateForLazyDeopt()348   CheckpointedInterpreterState GetCheckpointedStateForLazyDeopt() {
349     return CheckpointedInterpreterState(
350         BytecodeOffset(iterator_.current_offset()),
351         zone()->New<CompactInterpreterFrameState>(
352             *compilation_unit_, GetOutLiveness(), current_interpreter_frame_));
353   }
354 
355   template <typename NodeT>
MarkAsLazyDeoptResult(NodeT * value,interpreter::Register result_location)356   void MarkAsLazyDeoptResult(NodeT* value,
357                              interpreter::Register result_location) {
358     DCHECK_EQ(NodeT::kProperties.can_lazy_deopt(),
359               value->properties().can_lazy_deopt());
360     if constexpr (NodeT::kProperties.can_lazy_deopt()) {
361       DCHECK(result_location.is_valid());
362       DCHECK(!value->lazy_deopt_info()->result_location.is_valid());
363       value->lazy_deopt_info()->result_location = result_location;
364     }
365   }
366 
MarkPossibleSideEffect()367   void MarkPossibleSideEffect() {
368     // If there was a potential side effect, invalidate the previous checkpoint.
369     latest_checkpointed_state_.reset();
370   }
371 
next_offset()372   int next_offset() const {
373     return iterator_.current_offset() + iterator_.current_bytecode_size();
374   }
GetInLiveness()375   const compiler::BytecodeLivenessState* GetInLiveness() const {
376     return bytecode_analysis().GetInLivenessFor(iterator_.current_offset());
377   }
GetOutLiveness()378   const compiler::BytecodeLivenessState* GetOutLiveness() const {
379     return bytecode_analysis().GetOutLivenessFor(iterator_.current_offset());
380   }
381 
StartNewBlock(int offset)382   void StartNewBlock(int offset) {
383     DCHECK_NULL(current_block_);
384     current_block_ = zone()->New<BasicBlock>(merge_states_[offset]);
385     block_offset_ = offset;
386   }
387 
388   template <typename ControlNodeT, typename... Args>
CreateBlock(std::initializer_list<ValueNode * > control_inputs,Args &&...args)389   BasicBlock* CreateBlock(std::initializer_list<ValueNode*> control_inputs,
390                           Args&&... args) {
391     current_block_->set_control_node(CreateNewNode<ControlNodeT>(
392         control_inputs, std::forward<Args>(args)...));
393 
394     BasicBlock* block = current_block_;
395     current_block_ = nullptr;
396 
397     graph()->Add(block);
398     if (has_graph_labeller()) {
399       graph_labeller()->RegisterBasicBlock(block);
400     }
401     return block;
402   }
403 
404   // Update all jumps which were targetting the not-yet-created block at the
405   // given `block_offset`, to now point to the given `block`.
ResolveJumpsToBlockAtOffset(BasicBlock * block,int block_offset)406   void ResolveJumpsToBlockAtOffset(BasicBlock* block, int block_offset) const {
407     BasicBlockRef* jump_target_refs_head =
408         jump_targets_[block_offset].SetToBlockAndReturnNext(block);
409     while (jump_target_refs_head != nullptr) {
410       jump_target_refs_head =
411           jump_target_refs_head->SetToBlockAndReturnNext(block);
412     }
413     DCHECK_EQ(jump_targets_[block_offset].block_ptr(), block);
414   }
415 
416   template <typename ControlNodeT, typename... Args>
FinishBlock(int next_block_offset,std::initializer_list<ValueNode * > control_inputs,Args &&...args)417   BasicBlock* FinishBlock(int next_block_offset,
418                           std::initializer_list<ValueNode*> control_inputs,
419                           Args&&... args) {
420     BasicBlock* block =
421         CreateBlock<ControlNodeT>(control_inputs, std::forward<Args>(args)...);
422     ResolveJumpsToBlockAtOffset(block, block_offset_);
423 
424     // If the next block has merge states, then it's not a simple fallthrough,
425     // and we should reset the checkpoint validity.
426     if (merge_states_[next_block_offset] != nullptr) {
427       latest_checkpointed_state_.reset();
428     }
429     // Start a new block for the fallthrough path, unless it's a merge point, in
430     // which case we merge our state into it. That merge-point could also be a
431     // loop header, in which case the merge state might not exist yet (if the
432     // only predecessors are this path and the JumpLoop).
433     DCHECK_NULL(current_block_);
434     if (std::is_base_of<ConditionalControlNode, ControlNodeT>::value) {
435       if (NumPredecessors(next_block_offset) == 1) {
436         StartNewBlock(next_block_offset);
437       } else {
438         MergeIntoFrameState(block, next_block_offset);
439       }
440     }
441     return block;
442   }
443 
444   void BuildCallFromRegisterList(ConvertReceiverMode receiver_mode);
445   void BuildCallFromRegisters(int argc_count,
446                               ConvertReceiverMode receiver_mode);
447 
448   void BuildPropertyCellAccess(const compiler::PropertyCellRef& property_cell);
449 
450   template <Operation kOperation>
451   void BuildGenericUnaryOperationNode();
452   template <Operation kOperation>
453   void BuildGenericBinaryOperationNode();
454   template <Operation kOperation>
455   void BuildGenericBinarySmiOperationNode();
456 
457   template <Operation kOperation>
458   void VisitUnaryOperation();
459   template <Operation kOperation>
460   void VisitBinaryOperation();
461   template <Operation kOperation>
462   void VisitBinarySmiOperation();
463 
464   void MergeIntoFrameState(BasicBlock* block, int target);
465   void BuildBranchIfTrue(ValueNode* node, int true_target, int false_target);
466   void BuildBranchIfToBooleanTrue(ValueNode* node, int true_target,
467                                   int false_target);
468 
CalculatePredecessorCounts()469   void CalculatePredecessorCounts() {
470     // Add 1 after the end of the bytecode so we can always write to the offset
471     // after the last bytecode.
472     size_t array_length = bytecode().length() + 1;
473     predecessors_ = zone()->NewArray<uint32_t>(array_length);
474     MemsetUint32(predecessors_, 1, array_length);
475 
476     interpreter::BytecodeArrayIterator iterator(bytecode().object());
477     for (; !iterator.done(); iterator.Advance()) {
478       interpreter::Bytecode bytecode = iterator.current_bytecode();
479       if (interpreter::Bytecodes::IsJump(bytecode)) {
480         predecessors_[iterator.GetJumpTargetOffset()]++;
481         if (!interpreter::Bytecodes::IsConditionalJump(bytecode)) {
482           predecessors_[iterator.next_offset()]--;
483         }
484       } else if (interpreter::Bytecodes::IsSwitch(bytecode)) {
485         for (auto offset : iterator.GetJumpTableTargetOffsets()) {
486           predecessors_[offset.target_offset]++;
487         }
488       } else if (interpreter::Bytecodes::Returns(bytecode) ||
489                  interpreter::Bytecodes::UnconditionallyThrows(bytecode)) {
490         predecessors_[iterator.next_offset()]--;
491       }
492       // TODO(leszeks): Also consider handler entries (the bytecode analysis)
493       // will do this automatically I guess if we merge this into that.
494     }
495     DCHECK_EQ(0, predecessors_[bytecode().length()]);
496   }
497 
NumPredecessors(int offset)498   int NumPredecessors(int offset) { return predecessors_[offset]; }
499 
broker()500   compiler::JSHeapBroker* broker() const { return compilation_unit_->broker(); }
feedback()501   const compiler::FeedbackVectorRef& feedback() const {
502     return compilation_unit_->feedback();
503   }
FeedbackNexusForOperand(int slot_operand_index)504   const FeedbackNexus FeedbackNexusForOperand(int slot_operand_index) const {
505     return FeedbackNexus(feedback().object(),
506                          GetSlotOperand(slot_operand_index),
507                          broker()->feedback_nexus_config());
508   }
FeedbackNexusForSlot(FeedbackSlot slot)509   const FeedbackNexus FeedbackNexusForSlot(FeedbackSlot slot) const {
510     return FeedbackNexus(feedback().object(), slot,
511                          broker()->feedback_nexus_config());
512   }
bytecode()513   const compiler::BytecodeArrayRef& bytecode() const {
514     return compilation_unit_->bytecode();
515   }
bytecode_analysis()516   const compiler::BytecodeAnalysis& bytecode_analysis() const {
517     return compilation_unit_->bytecode_analysis();
518   }
local_isolate()519   LocalIsolate* local_isolate() const { return local_isolate_; }
zone()520   Zone* zone() const { return compilation_unit_->zone(); }
parameter_count()521   int parameter_count() const { return compilation_unit_->parameter_count(); }
register_count()522   int register_count() const { return compilation_unit_->register_count(); }
has_graph_labeller()523   bool has_graph_labeller() const {
524     return compilation_unit_->has_graph_labeller();
525   }
graph_labeller()526   MaglevGraphLabeller* graph_labeller() const {
527     return compilation_unit_->graph_labeller();
528   }
529 
530   LocalIsolate* const local_isolate_;
531   MaglevCompilationUnit* const compilation_unit_;
532   interpreter::BytecodeArrayIterator iterator_;
533   uint32_t* predecessors_;
534 
535   // Current block information.
536   BasicBlock* current_block_ = nullptr;
537   int block_offset_ = 0;
538   base::Optional<CheckpointedInterpreterState> latest_checkpointed_state_;
539 
540   BasicBlockRef* jump_targets_;
541   MergePointInterpreterFrameState** merge_states_;
542 
543   Graph* const graph_;
544   InterpreterFrameState current_interpreter_frame_;
545 
546   // Allow marking some bytecodes as unsupported during graph building, so that
547   // we can test maglev incrementally.
548   // TODO(v8:7700): Clean up after all bytecodes are supported.
549   bool found_unsupported_bytecode_ = false;
550   bool this_field_will_be_unused_once_all_bytecodes_are_supported_;
551 
552 #ifdef DEBUG
553   std::unordered_set<Node*> new_nodes_;
554 #endif
555 };
556 
557 }  // namespace maglev
558 }  // namespace internal
559 }  // namespace v8
560 
561 #endif  // V8_MAGLEV_MAGLEV_GRAPH_BUILDER_H_
562