• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/scheduler.h"
6 
7 #include <iomanip>
8 
9 #include "src/base/adapters.h"
10 #include "src/bit-vector.h"
11 #include "src/compiler/common-operator.h"
12 #include "src/compiler/control-equivalence.h"
13 #include "src/compiler/graph.h"
14 #include "src/compiler/node-marker.h"
15 #include "src/compiler/node-properties.h"
16 #include "src/compiler/node.h"
17 #include "src/zone/zone-containers.h"
18 
19 namespace v8 {
20 namespace internal {
21 namespace compiler {
22 
23 #define TRACE(...)                                       \
24   do {                                                   \
25     if (FLAG_trace_turbo_scheduler) PrintF(__VA_ARGS__); \
26   } while (false)
27 
Scheduler(Zone * zone,Graph * graph,Schedule * schedule,Flags flags)28 Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags)
29     : zone_(zone),
30       graph_(graph),
31       schedule_(schedule),
32       flags_(flags),
33       scheduled_nodes_(zone),
34       schedule_root_nodes_(zone),
35       schedule_queue_(zone),
36       node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone) {}
37 
38 
ComputeSchedule(Zone * zone,Graph * graph,Flags flags)39 Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags) {
40   Schedule* schedule = new (graph->zone())
41       Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount()));
42   Scheduler scheduler(zone, graph, schedule, flags);
43 
44   scheduler.BuildCFG();
45   scheduler.ComputeSpecialRPONumbering();
46   scheduler.GenerateImmediateDominatorTree();
47 
48   scheduler.PrepareUses();
49   scheduler.ScheduleEarly();
50   scheduler.ScheduleLate();
51 
52   scheduler.SealFinalSchedule();
53 
54   return schedule;
55 }
56 
57 
DefaultSchedulerData()58 Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
59   SchedulerData def = {schedule_->start(), 0, kUnknown};
60   return def;
61 }
62 
63 
GetData(Node * node)64 Scheduler::SchedulerData* Scheduler::GetData(Node* node) {
65   return &node_data_[node->id()];
66 }
67 
68 
GetPlacement(Node * node)69 Scheduler::Placement Scheduler::GetPlacement(Node* node) {
70   SchedulerData* data = GetData(node);
71   if (data->placement_ == kUnknown) {  // Compute placement, once, on demand.
72     switch (node->opcode()) {
73       case IrOpcode::kParameter:
74       case IrOpcode::kOsrValue:
75         // Parameters and OSR values are always fixed to the start block.
76         data->placement_ = kFixed;
77         break;
78       case IrOpcode::kPhi:
79       case IrOpcode::kEffectPhi: {
80         // Phis and effect phis are fixed if their control inputs are, whereas
81         // otherwise they are coupled to a floating control node.
82         Placement p = GetPlacement(NodeProperties::GetControlInput(node));
83         data->placement_ = (p == kFixed ? kFixed : kCoupled);
84         break;
85       }
86 #define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
87       CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
88 #undef DEFINE_CONTROL_CASE
89       {
90         // Control nodes that were not control-reachable from end may float.
91         data->placement_ = kSchedulable;
92         break;
93       }
94       default:
95         data->placement_ = kSchedulable;
96         break;
97     }
98   }
99   return data->placement_;
100 }
101 
102 
UpdatePlacement(Node * node,Placement placement)103 void Scheduler::UpdatePlacement(Node* node, Placement placement) {
104   SchedulerData* data = GetData(node);
105   if (data->placement_ != kUnknown) {  // Trap on mutation, not initialization.
106     switch (node->opcode()) {
107       case IrOpcode::kParameter:
108         // Parameters are fixed once and for all.
109         UNREACHABLE();
110         break;
111       case IrOpcode::kPhi:
112       case IrOpcode::kEffectPhi: {
113         // Phis and effect phis are coupled to their respective blocks.
114         DCHECK_EQ(Scheduler::kCoupled, data->placement_);
115         DCHECK_EQ(Scheduler::kFixed, placement);
116         Node* control = NodeProperties::GetControlInput(node);
117         BasicBlock* block = schedule_->block(control);
118         schedule_->AddNode(block, node);
119         break;
120       }
121 #define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
122       CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
123 #undef DEFINE_CONTROL_CASE
124       {
125         // Control nodes force coupled uses to be placed.
126         for (auto use : node->uses()) {
127           if (GetPlacement(use) == Scheduler::kCoupled) {
128             DCHECK_EQ(node, NodeProperties::GetControlInput(use));
129             UpdatePlacement(use, placement);
130           }
131         }
132         break;
133       }
134       default:
135         DCHECK_EQ(Scheduler::kSchedulable, data->placement_);
136         DCHECK_EQ(Scheduler::kScheduled, placement);
137         break;
138     }
139     // Reduce the use count of the node's inputs to potentially make them
140     // schedulable. If all the uses of a node have been scheduled, then the node
141     // itself can be scheduled.
142     for (Edge const edge : node->input_edges()) {
143       DecrementUnscheduledUseCount(edge.to(), edge.index(), edge.from());
144     }
145   }
146   data->placement_ = placement;
147 }
148 
149 
IsCoupledControlEdge(Node * node,int index)150 bool Scheduler::IsCoupledControlEdge(Node* node, int index) {
151   return GetPlacement(node) == kCoupled &&
152          NodeProperties::FirstControlIndex(node) == index;
153 }
154 
155 
IncrementUnscheduledUseCount(Node * node,int index,Node * from)156 void Scheduler::IncrementUnscheduledUseCount(Node* node, int index,
157                                              Node* from) {
158   // Make sure that control edges from coupled nodes are not counted.
159   if (IsCoupledControlEdge(from, index)) return;
160 
161   // Tracking use counts for fixed nodes is useless.
162   if (GetPlacement(node) == kFixed) return;
163 
164   // Use count for coupled nodes is summed up on their control.
165   if (GetPlacement(node) == kCoupled) {
166     Node* control = NodeProperties::GetControlInput(node);
167     return IncrementUnscheduledUseCount(control, index, from);
168   }
169 
170   ++(GetData(node)->unscheduled_count_);
171   if (FLAG_trace_turbo_scheduler) {
172     TRACE("  Use count of #%d:%s (used by #%d:%s)++ = %d\n", node->id(),
173           node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
174           GetData(node)->unscheduled_count_);
175   }
176 }
177 
178 
DecrementUnscheduledUseCount(Node * node,int index,Node * from)179 void Scheduler::DecrementUnscheduledUseCount(Node* node, int index,
180                                              Node* from) {
181   // Make sure that control edges from coupled nodes are not counted.
182   if (IsCoupledControlEdge(from, index)) return;
183 
184   // Tracking use counts for fixed nodes is useless.
185   if (GetPlacement(node) == kFixed) return;
186 
187   // Use count for coupled nodes is summed up on their control.
188   if (GetPlacement(node) == kCoupled) {
189     Node* control = NodeProperties::GetControlInput(node);
190     return DecrementUnscheduledUseCount(control, index, from);
191   }
192 
193   DCHECK(GetData(node)->unscheduled_count_ > 0);
194   --(GetData(node)->unscheduled_count_);
195   if (FLAG_trace_turbo_scheduler) {
196     TRACE("  Use count of #%d:%s (used by #%d:%s)-- = %d\n", node->id(),
197           node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
198           GetData(node)->unscheduled_count_);
199   }
200   if (GetData(node)->unscheduled_count_ == 0) {
201     TRACE("    newly eligible #%d:%s\n", node->id(), node->op()->mnemonic());
202     schedule_queue_.push(node);
203   }
204 }
205 
206 
207 // -----------------------------------------------------------------------------
208 // Phase 1: Build control-flow graph.
209 
210 
211 // Internal class to build a control flow graph (i.e the basic blocks and edges
212 // between them within a Schedule) from the node graph. Visits control edges of
213 // the graph backwards from an end node in order to find the connected control
214 // subgraph, needed for scheduling.
215 class CFGBuilder : public ZoneObject {
216  public:
CFGBuilder(Zone * zone,Scheduler * scheduler)217   CFGBuilder(Zone* zone, Scheduler* scheduler)
218       : zone_(zone),
219         scheduler_(scheduler),
220         schedule_(scheduler->schedule_),
221         queued_(scheduler->graph_, 2),
222         queue_(zone),
223         control_(zone),
224         component_entry_(nullptr),
225         component_start_(nullptr),
226         component_end_(nullptr) {}
227 
228   // Run the control flow graph construction algorithm by walking the graph
229   // backwards from end through control edges, building and connecting the
230   // basic blocks for control nodes.
Run()231   void Run() {
232     ResetDataStructures();
233     Queue(scheduler_->graph_->end());
234 
235     while (!queue_.empty()) {  // Breadth-first backwards traversal.
236       Node* node = queue_.front();
237       queue_.pop();
238       int max = NodeProperties::PastControlIndex(node);
239       for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
240         Queue(node->InputAt(i));
241       }
242     }
243 
244     for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
245       ConnectBlocks(*i);  // Connect block to its predecessor/successors.
246     }
247   }
248 
249   // Run the control flow graph construction for a minimal control-connected
250   // component ending in {exit} and merge that component into an existing
251   // control flow graph at the bottom of {block}.
Run(BasicBlock * block,Node * exit)252   void Run(BasicBlock* block, Node* exit) {
253     ResetDataStructures();
254     Queue(exit);
255 
256     component_entry_ = nullptr;
257     component_start_ = block;
258     component_end_ = schedule_->block(exit);
259     scheduler_->equivalence_->Run(exit);
260     while (!queue_.empty()) {  // Breadth-first backwards traversal.
261       Node* node = queue_.front();
262       queue_.pop();
263 
264       // Use control dependence equivalence to find a canonical single-entry
265       // single-exit region that makes up a minimal component to be scheduled.
266       if (IsSingleEntrySingleExitRegion(node, exit)) {
267         TRACE("Found SESE at #%d:%s\n", node->id(), node->op()->mnemonic());
268         DCHECK(!component_entry_);
269         component_entry_ = node;
270         continue;
271       }
272 
273       int max = NodeProperties::PastControlIndex(node);
274       for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
275         Queue(node->InputAt(i));
276       }
277     }
278     DCHECK(component_entry_);
279 
280     for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
281       ConnectBlocks(*i);  // Connect block to its predecessor/successors.
282     }
283   }
284 
285  private:
286   friend class ScheduleLateNodeVisitor;
287   friend class Scheduler;
288 
FixNode(BasicBlock * block,Node * node)289   void FixNode(BasicBlock* block, Node* node) {
290     schedule_->AddNode(block, node);
291     scheduler_->UpdatePlacement(node, Scheduler::kFixed);
292   }
293 
Queue(Node * node)294   void Queue(Node* node) {
295     // Mark the connected control nodes as they are queued.
296     if (!queued_.Get(node)) {
297       BuildBlocks(node);
298       queue_.push(node);
299       queued_.Set(node, true);
300       control_.push_back(node);
301     }
302   }
303 
BuildBlocks(Node * node)304   void BuildBlocks(Node* node) {
305     switch (node->opcode()) {
306       case IrOpcode::kEnd:
307         FixNode(schedule_->end(), node);
308         break;
309       case IrOpcode::kStart:
310         FixNode(schedule_->start(), node);
311         break;
312       case IrOpcode::kLoop:
313       case IrOpcode::kMerge:
314         BuildBlockForNode(node);
315         break;
316       case IrOpcode::kTerminate: {
317         // Put Terminate in the loop to which it refers.
318         Node* loop = NodeProperties::GetControlInput(node);
319         BasicBlock* block = BuildBlockForNode(loop);
320         FixNode(block, node);
321         break;
322       }
323       case IrOpcode::kBranch:
324       case IrOpcode::kSwitch:
325         BuildBlocksForSuccessors(node);
326         break;
327 #define BUILD_BLOCK_JS_CASE(Name) case IrOpcode::k##Name:
328         JS_OP_LIST(BUILD_BLOCK_JS_CASE)
329 // JS opcodes are just like calls => fall through.
330 #undef BUILD_BLOCK_JS_CASE
331       case IrOpcode::kCall:
332         if (NodeProperties::IsExceptionalCall(node)) {
333           BuildBlocksForSuccessors(node);
334         }
335         break;
336       default:
337         break;
338     }
339   }
340 
ConnectBlocks(Node * node)341   void ConnectBlocks(Node* node) {
342     switch (node->opcode()) {
343       case IrOpcode::kLoop:
344       case IrOpcode::kMerge:
345         ConnectMerge(node);
346         break;
347       case IrOpcode::kBranch:
348         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
349         ConnectBranch(node);
350         break;
351       case IrOpcode::kSwitch:
352         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
353         ConnectSwitch(node);
354         break;
355       case IrOpcode::kDeoptimize:
356         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
357         ConnectDeoptimize(node);
358         break;
359       case IrOpcode::kTailCall:
360         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
361         ConnectTailCall(node);
362         break;
363       case IrOpcode::kReturn:
364         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
365         ConnectReturn(node);
366         break;
367       case IrOpcode::kThrow:
368         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
369         ConnectThrow(node);
370         break;
371 #define CONNECT_BLOCK_JS_CASE(Name) case IrOpcode::k##Name:
372         JS_OP_LIST(CONNECT_BLOCK_JS_CASE)
373 // JS opcodes are just like calls => fall through.
374 #undef CONNECT_BLOCK_JS_CASE
375       case IrOpcode::kCall:
376         if (NodeProperties::IsExceptionalCall(node)) {
377           scheduler_->UpdatePlacement(node, Scheduler::kFixed);
378           ConnectCall(node);
379         }
380         break;
381       default:
382         break;
383     }
384   }
385 
BuildBlockForNode(Node * node)386   BasicBlock* BuildBlockForNode(Node* node) {
387     BasicBlock* block = schedule_->block(node);
388     if (block == nullptr) {
389       block = schedule_->NewBasicBlock();
390       TRACE("Create block id:%d for #%d:%s\n", block->id().ToInt(), node->id(),
391             node->op()->mnemonic());
392       FixNode(block, node);
393     }
394     return block;
395   }
396 
BuildBlocksForSuccessors(Node * node)397   void BuildBlocksForSuccessors(Node* node) {
398     size_t const successor_cnt = node->op()->ControlOutputCount();
399     Node** successors = zone_->NewArray<Node*>(successor_cnt);
400     NodeProperties::CollectControlProjections(node, successors, successor_cnt);
401     for (size_t index = 0; index < successor_cnt; ++index) {
402       BuildBlockForNode(successors[index]);
403     }
404   }
405 
CollectSuccessorBlocks(Node * node,BasicBlock ** successor_blocks,size_t successor_cnt)406   void CollectSuccessorBlocks(Node* node, BasicBlock** successor_blocks,
407                               size_t successor_cnt) {
408     Node** successors = reinterpret_cast<Node**>(successor_blocks);
409     NodeProperties::CollectControlProjections(node, successors, successor_cnt);
410     for (size_t index = 0; index < successor_cnt; ++index) {
411       successor_blocks[index] = schedule_->block(successors[index]);
412     }
413   }
414 
FindPredecessorBlock(Node * node)415   BasicBlock* FindPredecessorBlock(Node* node) {
416     BasicBlock* predecessor_block = nullptr;
417     while (true) {
418       predecessor_block = schedule_->block(node);
419       if (predecessor_block != nullptr) break;
420       node = NodeProperties::GetControlInput(node);
421     }
422     return predecessor_block;
423   }
424 
ConnectCall(Node * call)425   void ConnectCall(Node* call) {
426     BasicBlock* successor_blocks[2];
427     CollectSuccessorBlocks(call, successor_blocks, arraysize(successor_blocks));
428 
429     // Consider the exception continuation to be deferred.
430     successor_blocks[1]->set_deferred(true);
431 
432     Node* call_control = NodeProperties::GetControlInput(call);
433     BasicBlock* call_block = FindPredecessorBlock(call_control);
434     TraceConnect(call, call_block, successor_blocks[0]);
435     TraceConnect(call, call_block, successor_blocks[1]);
436     schedule_->AddCall(call_block, call, successor_blocks[0],
437                        successor_blocks[1]);
438   }
439 
ConnectBranch(Node * branch)440   void ConnectBranch(Node* branch) {
441     BasicBlock* successor_blocks[2];
442     CollectSuccessorBlocks(branch, successor_blocks,
443                            arraysize(successor_blocks));
444 
445     // Consider branch hints.
446     switch (BranchHintOf(branch->op())) {
447       case BranchHint::kNone:
448         break;
449       case BranchHint::kTrue:
450         successor_blocks[1]->set_deferred(true);
451         break;
452       case BranchHint::kFalse:
453         successor_blocks[0]->set_deferred(true);
454         break;
455     }
456 
457     if (branch == component_entry_) {
458       TraceConnect(branch, component_start_, successor_blocks[0]);
459       TraceConnect(branch, component_start_, successor_blocks[1]);
460       schedule_->InsertBranch(component_start_, component_end_, branch,
461                               successor_blocks[0], successor_blocks[1]);
462     } else {
463       Node* branch_control = NodeProperties::GetControlInput(branch);
464       BasicBlock* branch_block = FindPredecessorBlock(branch_control);
465       TraceConnect(branch, branch_block, successor_blocks[0]);
466       TraceConnect(branch, branch_block, successor_blocks[1]);
467       schedule_->AddBranch(branch_block, branch, successor_blocks[0],
468                            successor_blocks[1]);
469     }
470   }
471 
ConnectSwitch(Node * sw)472   void ConnectSwitch(Node* sw) {
473     size_t const successor_count = sw->op()->ControlOutputCount();
474     BasicBlock** successor_blocks =
475         zone_->NewArray<BasicBlock*>(successor_count);
476     CollectSuccessorBlocks(sw, successor_blocks, successor_count);
477 
478     if (sw == component_entry_) {
479       for (size_t index = 0; index < successor_count; ++index) {
480         TraceConnect(sw, component_start_, successor_blocks[index]);
481       }
482       schedule_->InsertSwitch(component_start_, component_end_, sw,
483                               successor_blocks, successor_count);
484     } else {
485       Node* switch_control = NodeProperties::GetControlInput(sw);
486       BasicBlock* switch_block = FindPredecessorBlock(switch_control);
487       for (size_t index = 0; index < successor_count; ++index) {
488         TraceConnect(sw, switch_block, successor_blocks[index]);
489       }
490       schedule_->AddSwitch(switch_block, sw, successor_blocks, successor_count);
491     }
492   }
493 
ConnectMerge(Node * merge)494   void ConnectMerge(Node* merge) {
495     // Don't connect the special merge at the end to its predecessors.
496     if (IsFinalMerge(merge)) return;
497 
498     BasicBlock* block = schedule_->block(merge);
499     DCHECK_NOT_NULL(block);
500     // For all of the merge's control inputs, add a goto at the end to the
501     // merge's basic block.
502     for (Node* const input : merge->inputs()) {
503       BasicBlock* predecessor_block = FindPredecessorBlock(input);
504       TraceConnect(merge, predecessor_block, block);
505       schedule_->AddGoto(predecessor_block, block);
506     }
507   }
508 
ConnectTailCall(Node * call)509   void ConnectTailCall(Node* call) {
510     Node* call_control = NodeProperties::GetControlInput(call);
511     BasicBlock* call_block = FindPredecessorBlock(call_control);
512     TraceConnect(call, call_block, nullptr);
513     schedule_->AddTailCall(call_block, call);
514   }
515 
ConnectReturn(Node * ret)516   void ConnectReturn(Node* ret) {
517     Node* return_control = NodeProperties::GetControlInput(ret);
518     BasicBlock* return_block = FindPredecessorBlock(return_control);
519     TraceConnect(ret, return_block, nullptr);
520     schedule_->AddReturn(return_block, ret);
521   }
522 
ConnectDeoptimize(Node * deopt)523   void ConnectDeoptimize(Node* deopt) {
524     Node* deoptimize_control = NodeProperties::GetControlInput(deopt);
525     BasicBlock* deoptimize_block = FindPredecessorBlock(deoptimize_control);
526     TraceConnect(deopt, deoptimize_block, nullptr);
527     schedule_->AddDeoptimize(deoptimize_block, deopt);
528   }
529 
ConnectThrow(Node * thr)530   void ConnectThrow(Node* thr) {
531     Node* throw_control = NodeProperties::GetControlInput(thr);
532     BasicBlock* throw_block = FindPredecessorBlock(throw_control);
533     TraceConnect(thr, throw_block, nullptr);
534     schedule_->AddThrow(throw_block, thr);
535   }
536 
TraceConnect(Node * node,BasicBlock * block,BasicBlock * succ)537   void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) {
538     DCHECK_NOT_NULL(block);
539     if (succ == nullptr) {
540       TRACE("Connect #%d:%s, id:%d -> end\n", node->id(),
541             node->op()->mnemonic(), block->id().ToInt());
542     } else {
543       TRACE("Connect #%d:%s, id:%d -> id:%d\n", node->id(),
544             node->op()->mnemonic(), block->id().ToInt(), succ->id().ToInt());
545     }
546   }
547 
IsFinalMerge(Node * node)548   bool IsFinalMerge(Node* node) {
549     return (node->opcode() == IrOpcode::kMerge &&
550             node == scheduler_->graph_->end()->InputAt(0));
551   }
552 
IsSingleEntrySingleExitRegion(Node * entry,Node * exit) const553   bool IsSingleEntrySingleExitRegion(Node* entry, Node* exit) const {
554     size_t entry_class = scheduler_->equivalence_->ClassOf(entry);
555     size_t exit_class = scheduler_->equivalence_->ClassOf(exit);
556     return entry != exit && entry_class == exit_class;
557   }
558 
ResetDataStructures()559   void ResetDataStructures() {
560     control_.clear();
561     DCHECK(queue_.empty());
562     DCHECK(control_.empty());
563   }
564 
565   Zone* zone_;
566   Scheduler* scheduler_;
567   Schedule* schedule_;
568   NodeMarker<bool> queued_;      // Mark indicating whether node is queued.
569   ZoneQueue<Node*> queue_;       // Queue used for breadth-first traversal.
570   NodeVector control_;           // List of encountered control nodes.
571   Node* component_entry_;        // Component single-entry node.
572   BasicBlock* component_start_;  // Component single-entry block.
573   BasicBlock* component_end_;    // Component single-exit block.
574 };
575 
576 
BuildCFG()577 void Scheduler::BuildCFG() {
578   TRACE("--- CREATING CFG -------------------------------------------\n");
579 
580   // Instantiate a new control equivalence algorithm for the graph.
581   equivalence_ = new (zone_) ControlEquivalence(zone_, graph_);
582 
583   // Build a control-flow graph for the main control-connected component that
584   // is being spanned by the graph's start and end nodes.
585   control_flow_builder_ = new (zone_) CFGBuilder(zone_, this);
586   control_flow_builder_->Run();
587 
588   // Initialize per-block data.
589   scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_));
590 }
591 
592 
593 // -----------------------------------------------------------------------------
594 // Phase 2: Compute special RPO and dominator tree.
595 
596 
597 // Compute the special reverse-post-order block ordering, which is essentially
598 // a RPO of the graph where loop bodies are contiguous. Properties:
599 // 1. If block A is a predecessor of B, then A appears before B in the order,
600 //    unless B is a loop header and A is in the loop headed at B
601 //    (i.e. A -> B is a backedge).
602 // => If block A dominates block B, then A appears before B in the order.
603 // => If block A is a loop header, A appears before all blocks in the loop
604 //    headed at A.
605 // 2. All loops are contiguous in the order (i.e. no intervening blocks that
606 //    do not belong to the loop.)
607 // Note a simple RPO traversal satisfies (1) but not (2).
608 class SpecialRPONumberer : public ZoneObject {
609  public:
SpecialRPONumberer(Zone * zone,Schedule * schedule)610   SpecialRPONumberer(Zone* zone, Schedule* schedule)
611       : zone_(zone),
612         schedule_(schedule),
613         order_(nullptr),
614         beyond_end_(nullptr),
615         loops_(zone),
616         backedges_(zone),
617         stack_(zone),
618         previous_block_count_(0),
619         empty_(0, zone) {}
620 
621   // Computes the special reverse-post-order for the main control flow graph,
622   // that is for the graph spanned between the schedule's start and end blocks.
ComputeSpecialRPO()623   void ComputeSpecialRPO() {
624     DCHECK(schedule_->end()->SuccessorCount() == 0);
625     DCHECK(!order_);  // Main order does not exist yet.
626     ComputeAndInsertSpecialRPO(schedule_->start(), schedule_->end());
627   }
628 
629   // Computes the special reverse-post-order for a partial control flow graph,
630   // that is for the graph spanned between the given {entry} and {end} blocks,
631   // then updates the existing ordering with this new information.
UpdateSpecialRPO(BasicBlock * entry,BasicBlock * end)632   void UpdateSpecialRPO(BasicBlock* entry, BasicBlock* end) {
633     DCHECK(order_);  // Main order to be updated is present.
634     ComputeAndInsertSpecialRPO(entry, end);
635   }
636 
637   // Serialize the previously computed order as a special reverse-post-order
638   // numbering for basic blocks into the final schedule.
SerializeRPOIntoSchedule()639   void SerializeRPOIntoSchedule() {
640     int32_t number = 0;
641     for (BasicBlock* b = order_; b != nullptr; b = b->rpo_next()) {
642       b->set_rpo_number(number++);
643       schedule_->rpo_order()->push_back(b);
644     }
645     BeyondEndSentinel()->set_rpo_number(number);
646   }
647 
648   // Print and verify the special reverse-post-order.
PrintAndVerifySpecialRPO()649   void PrintAndVerifySpecialRPO() {
650 #if DEBUG
651     if (FLAG_trace_turbo_scheduler) PrintRPO();
652     VerifySpecialRPO();
653 #endif
654   }
655 
GetOutgoingBlocks(BasicBlock * block)656   const ZoneVector<BasicBlock*>& GetOutgoingBlocks(BasicBlock* block) {
657     if (HasLoopNumber(block)) {
658       LoopInfo const& loop = loops_[GetLoopNumber(block)];
659       if (loop.outgoing) return *loop.outgoing;
660     }
661     return empty_;
662   }
663 
664  private:
665   typedef std::pair<BasicBlock*, size_t> Backedge;
666 
667   // Numbering for BasicBlock::rpo_number for this block traversal:
668   static const int kBlockOnStack = -2;
669   static const int kBlockVisited1 = -3;
670   static const int kBlockVisited2 = -4;
671   static const int kBlockUnvisited1 = -1;
672   static const int kBlockUnvisited2 = kBlockVisited1;
673 
674   struct SpecialRPOStackFrame {
675     BasicBlock* block;
676     size_t index;
677   };
678 
679   struct LoopInfo {
680     BasicBlock* header;
681     ZoneVector<BasicBlock*>* outgoing;
682     BitVector* members;
683     LoopInfo* prev;
684     BasicBlock* end;
685     BasicBlock* start;
686 
AddOutgoingv8::internal::compiler::SpecialRPONumberer::LoopInfo687     void AddOutgoing(Zone* zone, BasicBlock* block) {
688       if (outgoing == nullptr) {
689         outgoing = new (zone->New(sizeof(ZoneVector<BasicBlock*>)))
690             ZoneVector<BasicBlock*>(zone);
691       }
692       outgoing->push_back(block);
693     }
694   };
695 
Push(ZoneVector<SpecialRPOStackFrame> & stack,int depth,BasicBlock * child,int unvisited)696   int Push(ZoneVector<SpecialRPOStackFrame>& stack, int depth,
697            BasicBlock* child, int unvisited) {
698     if (child->rpo_number() == unvisited) {
699       stack[depth].block = child;
700       stack[depth].index = 0;
701       child->set_rpo_number(kBlockOnStack);
702       return depth + 1;
703     }
704     return depth;
705   }
706 
PushFront(BasicBlock * head,BasicBlock * block)707   BasicBlock* PushFront(BasicBlock* head, BasicBlock* block) {
708     block->set_rpo_next(head);
709     return block;
710   }
711 
GetLoopNumber(BasicBlock * block)712   static int GetLoopNumber(BasicBlock* block) { return block->loop_number(); }
SetLoopNumber(BasicBlock * block,int loop_number)713   static void SetLoopNumber(BasicBlock* block, int loop_number) {
714     return block->set_loop_number(loop_number);
715   }
HasLoopNumber(BasicBlock * block)716   static bool HasLoopNumber(BasicBlock* block) {
717     return block->loop_number() >= 0;
718   }
719 
720   // TODO(mstarzinger): We only need this special sentinel because some tests
721   // use the schedule's end block in actual control flow (e.g. with end having
722   // successors). Once this has been cleaned up we can use the end block here.
BeyondEndSentinel()723   BasicBlock* BeyondEndSentinel() {
724     if (beyond_end_ == nullptr) {
725       BasicBlock::Id id = BasicBlock::Id::FromInt(-1);
726       beyond_end_ = new (schedule_->zone()) BasicBlock(schedule_->zone(), id);
727     }
728     return beyond_end_;
729   }
730 
731   // Compute special RPO for the control flow graph between {entry} and {end},
732   // mutating any existing order so that the result is still valid.
ComputeAndInsertSpecialRPO(BasicBlock * entry,BasicBlock * end)733   void ComputeAndInsertSpecialRPO(BasicBlock* entry, BasicBlock* end) {
734     // RPO should not have been serialized for this schedule yet.
735     CHECK_EQ(kBlockUnvisited1, schedule_->start()->loop_number());
736     CHECK_EQ(kBlockUnvisited1, schedule_->start()->rpo_number());
737     CHECK_EQ(0, static_cast<int>(schedule_->rpo_order()->size()));
738 
739     // Find correct insertion point within existing order.
740     BasicBlock* insertion_point = entry->rpo_next();
741     BasicBlock* order = insertion_point;
742 
743     // Perform an iterative RPO traversal using an explicit stack,
744     // recording backedges that form cycles. O(|B|).
745     DCHECK_LT(previous_block_count_, schedule_->BasicBlockCount());
746     stack_.resize(schedule_->BasicBlockCount() - previous_block_count_);
747     previous_block_count_ = schedule_->BasicBlockCount();
748     int stack_depth = Push(stack_, 0, entry, kBlockUnvisited1);
749     int num_loops = static_cast<int>(loops_.size());
750 
751     while (stack_depth > 0) {
752       int current = stack_depth - 1;
753       SpecialRPOStackFrame* frame = &stack_[current];
754 
755       if (frame->block != end &&
756           frame->index < frame->block->SuccessorCount()) {
757         // Process the next successor.
758         BasicBlock* succ = frame->block->SuccessorAt(frame->index++);
759         if (succ->rpo_number() == kBlockVisited1) continue;
760         if (succ->rpo_number() == kBlockOnStack) {
761           // The successor is on the stack, so this is a backedge (cycle).
762           backedges_.push_back(Backedge(frame->block, frame->index - 1));
763           if (!HasLoopNumber(succ)) {
764             // Assign a new loop number to the header if it doesn't have one.
765             SetLoopNumber(succ, num_loops++);
766           }
767         } else {
768           // Push the successor onto the stack.
769           DCHECK(succ->rpo_number() == kBlockUnvisited1);
770           stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited1);
771         }
772       } else {
773         // Finished with all successors; pop the stack and add the block.
774         order = PushFront(order, frame->block);
775         frame->block->set_rpo_number(kBlockVisited1);
776         stack_depth--;
777       }
778     }
779 
780     // If no loops were encountered, then the order we computed was correct.
781     if (num_loops > static_cast<int>(loops_.size())) {
782       // Otherwise, compute the loop information from the backedges in order
783       // to perform a traversal that groups loop bodies together.
784       ComputeLoopInfo(stack_, num_loops, &backedges_);
785 
786       // Initialize the "loop stack". Note the entry could be a loop header.
787       LoopInfo* loop =
788           HasLoopNumber(entry) ? &loops_[GetLoopNumber(entry)] : nullptr;
789       order = insertion_point;
790 
791       // Perform an iterative post-order traversal, visiting loop bodies before
792       // edges that lead out of loops. Visits each block once, but linking loop
793       // sections together is linear in the loop size, so overall is
794       // O(|B| + max(loop_depth) * max(|loop|))
795       stack_depth = Push(stack_, 0, entry, kBlockUnvisited2);
796       while (stack_depth > 0) {
797         SpecialRPOStackFrame* frame = &stack_[stack_depth - 1];
798         BasicBlock* block = frame->block;
799         BasicBlock* succ = nullptr;
800 
801         if (block != end && frame->index < block->SuccessorCount()) {
802           // Process the next normal successor.
803           succ = block->SuccessorAt(frame->index++);
804         } else if (HasLoopNumber(block)) {
805           // Process additional outgoing edges from the loop header.
806           if (block->rpo_number() == kBlockOnStack) {
807             // Finish the loop body the first time the header is left on the
808             // stack.
809             DCHECK(loop != nullptr && loop->header == block);
810             loop->start = PushFront(order, block);
811             order = loop->end;
812             block->set_rpo_number(kBlockVisited2);
813             // Pop the loop stack and continue visiting outgoing edges within
814             // the context of the outer loop, if any.
815             loop = loop->prev;
816             // We leave the loop header on the stack; the rest of this iteration
817             // and later iterations will go through its outgoing edges list.
818           }
819 
820           // Use the next outgoing edge if there are any.
821           size_t outgoing_index = frame->index - block->SuccessorCount();
822           LoopInfo* info = &loops_[GetLoopNumber(block)];
823           DCHECK(loop != info);
824           if (block != entry && info->outgoing != nullptr &&
825               outgoing_index < info->outgoing->size()) {
826             succ = info->outgoing->at(outgoing_index);
827             frame->index++;
828           }
829         }
830 
831         if (succ != nullptr) {
832           // Process the next successor.
833           if (succ->rpo_number() == kBlockOnStack) continue;
834           if (succ->rpo_number() == kBlockVisited2) continue;
835           DCHECK(succ->rpo_number() == kBlockUnvisited2);
836           if (loop != nullptr && !loop->members->Contains(succ->id().ToInt())) {
837             // The successor is not in the current loop or any nested loop.
838             // Add it to the outgoing edges of this loop and visit it later.
839             loop->AddOutgoing(zone_, succ);
840           } else {
841             // Push the successor onto the stack.
842             stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited2);
843             if (HasLoopNumber(succ)) {
844               // Push the inner loop onto the loop stack.
845               DCHECK(GetLoopNumber(succ) < num_loops);
846               LoopInfo* next = &loops_[GetLoopNumber(succ)];
847               next->end = order;
848               next->prev = loop;
849               loop = next;
850             }
851           }
852         } else {
853           // Finished with all successors of the current block.
854           if (HasLoopNumber(block)) {
855             // If we are going to pop a loop header, then add its entire body.
856             LoopInfo* info = &loops_[GetLoopNumber(block)];
857             for (BasicBlock* b = info->start; true; b = b->rpo_next()) {
858               if (b->rpo_next() == info->end) {
859                 b->set_rpo_next(order);
860                 info->end = order;
861                 break;
862               }
863             }
864             order = info->start;
865           } else {
866             // Pop a single node off the stack and add it to the order.
867             order = PushFront(order, block);
868             block->set_rpo_number(kBlockVisited2);
869           }
870           stack_depth--;
871         }
872       }
873     }
874 
875     // Publish new order the first time.
876     if (order_ == nullptr) order_ = order;
877 
878     // Compute the correct loop headers and set the correct loop ends.
879     LoopInfo* current_loop = nullptr;
880     BasicBlock* current_header = entry->loop_header();
881     int32_t loop_depth = entry->loop_depth();
882     if (entry->IsLoopHeader()) --loop_depth;  // Entry might be a loop header.
883     for (BasicBlock* b = order; b != insertion_point; b = b->rpo_next()) {
884       BasicBlock* current = b;
885 
886       // Reset BasicBlock::rpo_number again.
887       current->set_rpo_number(kBlockUnvisited1);
888 
889       // Finish the previous loop(s) if we just exited them.
890       while (current_header != nullptr &&
891              current == current_header->loop_end()) {
892         DCHECK(current_header->IsLoopHeader());
893         DCHECK_NOT_NULL(current_loop);
894         current_loop = current_loop->prev;
895         current_header =
896             current_loop == nullptr ? nullptr : current_loop->header;
897         --loop_depth;
898       }
899       current->set_loop_header(current_header);
900 
901       // Push a new loop onto the stack if this loop is a loop header.
902       if (HasLoopNumber(current)) {
903         ++loop_depth;
904         current_loop = &loops_[GetLoopNumber(current)];
905         BasicBlock* end = current_loop->end;
906         current->set_loop_end(end == nullptr ? BeyondEndSentinel() : end);
907         current_header = current_loop->header;
908         TRACE("id:%d is a loop header, increment loop depth to %d\n",
909               current->id().ToInt(), loop_depth);
910       }
911 
912       current->set_loop_depth(loop_depth);
913 
914       if (current->loop_header() == nullptr) {
915         TRACE("id:%d is not in a loop (depth == %d)\n", current->id().ToInt(),
916               current->loop_depth());
917       } else {
918         TRACE("id:%d has loop header id:%d, (depth == %d)\n",
919               current->id().ToInt(), current->loop_header()->id().ToInt(),
920               current->loop_depth());
921       }
922     }
923   }
924 
925   // Computes loop membership from the backedges of the control flow graph.
ComputeLoopInfo(ZoneVector<SpecialRPOStackFrame> & queue,size_t num_loops,ZoneVector<Backedge> * backedges)926   void ComputeLoopInfo(ZoneVector<SpecialRPOStackFrame>& queue,
927                        size_t num_loops, ZoneVector<Backedge>* backedges) {
928     // Extend existing loop membership vectors.
929     for (LoopInfo& loop : loops_) {
930       BitVector* new_members = new (zone_)
931           BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_);
932       new_members->CopyFrom(*loop.members);
933       loop.members = new_members;
934     }
935 
936     // Extend loop information vector.
937     loops_.resize(num_loops, LoopInfo());
938 
939     // Compute loop membership starting from backedges.
940     // O(max(loop_depth) * max(|loop|)
941     for (size_t i = 0; i < backedges->size(); i++) {
942       BasicBlock* member = backedges->at(i).first;
943       BasicBlock* header = member->SuccessorAt(backedges->at(i).second);
944       size_t loop_num = GetLoopNumber(header);
945       if (loops_[loop_num].header == nullptr) {
946         loops_[loop_num].header = header;
947         loops_[loop_num].members = new (zone_)
948             BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_);
949       }
950 
951       int queue_length = 0;
952       if (member != header) {
953         // As long as the header doesn't have a backedge to itself,
954         // Push the member onto the queue and process its predecessors.
955         if (!loops_[loop_num].members->Contains(member->id().ToInt())) {
956           loops_[loop_num].members->Add(member->id().ToInt());
957         }
958         queue[queue_length++].block = member;
959       }
960 
961       // Propagate loop membership backwards. All predecessors of M up to the
962       // loop header H are members of the loop too. O(|blocks between M and H|).
963       while (queue_length > 0) {
964         BasicBlock* block = queue[--queue_length].block;
965         for (size_t i = 0; i < block->PredecessorCount(); i++) {
966           BasicBlock* pred = block->PredecessorAt(i);
967           if (pred != header) {
968             if (!loops_[loop_num].members->Contains(pred->id().ToInt())) {
969               loops_[loop_num].members->Add(pred->id().ToInt());
970               queue[queue_length++].block = pred;
971             }
972           }
973         }
974       }
975     }
976   }
977 
978 #if DEBUG
PrintRPO()979   void PrintRPO() {
980     OFStream os(stdout);
981     os << "RPO with " << loops_.size() << " loops";
982     if (loops_.size() > 0) {
983       os << " (";
984       for (size_t i = 0; i < loops_.size(); i++) {
985         if (i > 0) os << " ";
986         os << "id:" << loops_[i].header->id();
987       }
988       os << ")";
989     }
990     os << ":\n";
991 
992     for (BasicBlock* block = order_; block != nullptr;
993          block = block->rpo_next()) {
994       os << std::setw(5) << "B" << block->rpo_number() << ":";
995       for (size_t i = 0; i < loops_.size(); i++) {
996         bool range = loops_[i].header->LoopContains(block);
997         bool membership = loops_[i].header != block && range;
998         os << (membership ? " |" : "  ");
999         os << (range ? "x" : " ");
1000       }
1001       os << "  id:" << block->id() << ": ";
1002       if (block->loop_end() != nullptr) {
1003         os << " range: [B" << block->rpo_number() << ", B"
1004            << block->loop_end()->rpo_number() << ")";
1005       }
1006       if (block->loop_header() != nullptr) {
1007         os << " header: id:" << block->loop_header()->id();
1008       }
1009       if (block->loop_depth() > 0) {
1010         os << " depth: " << block->loop_depth();
1011       }
1012       os << "\n";
1013     }
1014   }
1015 
VerifySpecialRPO()1016   void VerifySpecialRPO() {
1017     BasicBlockVector* order = schedule_->rpo_order();
1018     DCHECK(order->size() > 0);
1019     DCHECK((*order)[0]->id().ToInt() == 0);  // entry should be first.
1020 
1021     for (size_t i = 0; i < loops_.size(); i++) {
1022       LoopInfo* loop = &loops_[i];
1023       BasicBlock* header = loop->header;
1024       BasicBlock* end = header->loop_end();
1025 
1026       DCHECK_NOT_NULL(header);
1027       DCHECK(header->rpo_number() >= 0);
1028       DCHECK(header->rpo_number() < static_cast<int>(order->size()));
1029       DCHECK_NOT_NULL(end);
1030       DCHECK(end->rpo_number() <= static_cast<int>(order->size()));
1031       DCHECK(end->rpo_number() > header->rpo_number());
1032       DCHECK(header->loop_header() != header);
1033 
1034       // Verify the start ... end list relationship.
1035       int links = 0;
1036       BasicBlock* block = loop->start;
1037       DCHECK_EQ(header, block);
1038       bool end_found;
1039       while (true) {
1040         if (block == nullptr || block == loop->end) {
1041           end_found = (loop->end == block);
1042           break;
1043         }
1044         // The list should be in same order as the final result.
1045         DCHECK(block->rpo_number() == links + header->rpo_number());
1046         links++;
1047         block = block->rpo_next();
1048         DCHECK_LT(links, static_cast<int>(2 * order->size()));  // cycle?
1049       }
1050       DCHECK(links > 0);
1051       DCHECK(links == end->rpo_number() - header->rpo_number());
1052       DCHECK(end_found);
1053 
1054       // Check loop depth of the header.
1055       int loop_depth = 0;
1056       for (LoopInfo* outer = loop; outer != nullptr; outer = outer->prev) {
1057         loop_depth++;
1058       }
1059       DCHECK_EQ(loop_depth, header->loop_depth());
1060 
1061       // Check the contiguousness of loops.
1062       int count = 0;
1063       for (int j = 0; j < static_cast<int>(order->size()); j++) {
1064         BasicBlock* block = order->at(j);
1065         DCHECK(block->rpo_number() == j);
1066         if (j < header->rpo_number() || j >= end->rpo_number()) {
1067           DCHECK(!header->LoopContains(block));
1068         } else {
1069           DCHECK(header->LoopContains(block));
1070           DCHECK_GE(block->loop_depth(), loop_depth);
1071           count++;
1072         }
1073       }
1074       DCHECK(links == count);
1075     }
1076   }
1077 #endif  // DEBUG
1078 
1079   Zone* zone_;
1080   Schedule* schedule_;
1081   BasicBlock* order_;
1082   BasicBlock* beyond_end_;
1083   ZoneVector<LoopInfo> loops_;
1084   ZoneVector<Backedge> backedges_;
1085   ZoneVector<SpecialRPOStackFrame> stack_;
1086   size_t previous_block_count_;
1087   ZoneVector<BasicBlock*> const empty_;
1088 };
1089 
1090 
ComputeSpecialRPO(Zone * zone,Schedule * schedule)1091 BasicBlockVector* Scheduler::ComputeSpecialRPO(Zone* zone, Schedule* schedule) {
1092   SpecialRPONumberer numberer(zone, schedule);
1093   numberer.ComputeSpecialRPO();
1094   numberer.SerializeRPOIntoSchedule();
1095   numberer.PrintAndVerifySpecialRPO();
1096   return schedule->rpo_order();
1097 }
1098 
1099 
ComputeSpecialRPONumbering()1100 void Scheduler::ComputeSpecialRPONumbering() {
1101   TRACE("--- COMPUTING SPECIAL RPO ----------------------------------\n");
1102 
1103   // Compute the special reverse-post-order for basic blocks.
1104   special_rpo_ = new (zone_) SpecialRPONumberer(zone_, schedule_);
1105   special_rpo_->ComputeSpecialRPO();
1106 }
1107 
1108 
PropagateImmediateDominators(BasicBlock * block)1109 void Scheduler::PropagateImmediateDominators(BasicBlock* block) {
1110   for (/*nop*/; block != nullptr; block = block->rpo_next()) {
1111     auto pred = block->predecessors().begin();
1112     auto end = block->predecessors().end();
1113     DCHECK(pred != end);  // All blocks except start have predecessors.
1114     BasicBlock* dominator = *pred;
1115     bool deferred = dominator->deferred();
1116     // For multiple predecessors, walk up the dominator tree until a common
1117     // dominator is found. Visitation order guarantees that all predecessors
1118     // except for backwards edges have been visited.
1119     for (++pred; pred != end; ++pred) {
1120       // Don't examine backwards edges.
1121       if ((*pred)->dominator_depth() < 0) continue;
1122       dominator = BasicBlock::GetCommonDominator(dominator, *pred);
1123       deferred = deferred & (*pred)->deferred();
1124     }
1125     block->set_dominator(dominator);
1126     block->set_dominator_depth(dominator->dominator_depth() + 1);
1127     block->set_deferred(deferred | block->deferred());
1128     TRACE("Block id:%d's idom is id:%d, depth = %d\n", block->id().ToInt(),
1129           dominator->id().ToInt(), block->dominator_depth());
1130   }
1131 }
1132 
1133 
GenerateImmediateDominatorTree()1134 void Scheduler::GenerateImmediateDominatorTree() {
1135   TRACE("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n");
1136 
1137   // Seed start block to be the first dominator.
1138   schedule_->start()->set_dominator_depth(0);
1139 
1140   // Build the block dominator tree resulting from the above seed.
1141   PropagateImmediateDominators(schedule_->start()->rpo_next());
1142 }
1143 
1144 
1145 // -----------------------------------------------------------------------------
1146 // Phase 3: Prepare use counts for nodes.
1147 
1148 
1149 class PrepareUsesVisitor {
1150  public:
PrepareUsesVisitor(Scheduler * scheduler)1151   explicit PrepareUsesVisitor(Scheduler* scheduler)
1152       : scheduler_(scheduler), schedule_(scheduler->schedule_) {}
1153 
Pre(Node * node)1154   void Pre(Node* node) {
1155     if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
1156       // Fixed nodes are always roots for schedule late.
1157       scheduler_->schedule_root_nodes_.push_back(node);
1158       if (!schedule_->IsScheduled(node)) {
1159         // Make sure root nodes are scheduled in their respective blocks.
1160         TRACE("Scheduling fixed position node #%d:%s\n", node->id(),
1161               node->op()->mnemonic());
1162         IrOpcode::Value opcode = node->opcode();
1163         BasicBlock* block =
1164             opcode == IrOpcode::kParameter
1165                 ? schedule_->start()
1166                 : schedule_->block(NodeProperties::GetControlInput(node));
1167         DCHECK_NOT_NULL(block);
1168         schedule_->AddNode(block, node);
1169       }
1170     }
1171   }
1172 
PostEdge(Node * from,int index,Node * to)1173   void PostEdge(Node* from, int index, Node* to) {
1174     // If the edge is from an unscheduled node, then tally it in the use count
1175     // for all of its inputs. The same criterion will be used in ScheduleLate
1176     // for decrementing use counts.
1177     if (!schedule_->IsScheduled(from)) {
1178       DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from));
1179       scheduler_->IncrementUnscheduledUseCount(to, index, from);
1180     }
1181   }
1182 
1183  private:
1184   Scheduler* scheduler_;
1185   Schedule* schedule_;
1186 };
1187 
1188 
PrepareUses()1189 void Scheduler::PrepareUses() {
1190   TRACE("--- PREPARE USES -------------------------------------------\n");
1191 
1192   // Count the uses of every node, which is used to ensure that all of a
1193   // node's uses are scheduled before the node itself.
1194   PrepareUsesVisitor prepare_uses(this);
1195 
1196   // TODO(turbofan): simplify the careful pre/post ordering here.
1197   BoolVector visited(graph_->NodeCount(), false, zone_);
1198   ZoneStack<Node::InputEdges::iterator> stack(zone_);
1199   Node* node = graph_->end();
1200   prepare_uses.Pre(node);
1201   visited[node->id()] = true;
1202   stack.push(node->input_edges().begin());
1203   while (!stack.empty()) {
1204     Edge edge = *stack.top();
1205     Node* node = edge.to();
1206     if (visited[node->id()]) {
1207       prepare_uses.PostEdge(edge.from(), edge.index(), edge.to());
1208       if (++stack.top() == edge.from()->input_edges().end()) stack.pop();
1209     } else {
1210       prepare_uses.Pre(node);
1211       visited[node->id()] = true;
1212       if (node->InputCount() > 0) stack.push(node->input_edges().begin());
1213     }
1214   }
1215 }
1216 
1217 
1218 // -----------------------------------------------------------------------------
1219 // Phase 4: Schedule nodes early.
1220 
1221 
1222 class ScheduleEarlyNodeVisitor {
1223  public:
ScheduleEarlyNodeVisitor(Zone * zone,Scheduler * scheduler)1224   ScheduleEarlyNodeVisitor(Zone* zone, Scheduler* scheduler)
1225       : scheduler_(scheduler), schedule_(scheduler->schedule_), queue_(zone) {}
1226 
1227   // Run the schedule early algorithm on a set of fixed root nodes.
Run(NodeVector * roots)1228   void Run(NodeVector* roots) {
1229     for (Node* const root : *roots) {
1230       queue_.push(root);
1231       while (!queue_.empty()) {
1232         VisitNode(queue_.front());
1233         queue_.pop();
1234       }
1235     }
1236   }
1237 
1238  private:
1239   // Visits one node from the queue and propagates its current schedule early
1240   // position to all uses. This in turn might push more nodes onto the queue.
VisitNode(Node * node)1241   void VisitNode(Node* node) {
1242     Scheduler::SchedulerData* data = scheduler_->GetData(node);
1243 
1244     // Fixed nodes already know their schedule early position.
1245     if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
1246       data->minimum_block_ = schedule_->block(node);
1247       TRACE("Fixing #%d:%s minimum_block = id:%d, dominator_depth = %d\n",
1248             node->id(), node->op()->mnemonic(),
1249             data->minimum_block_->id().ToInt(),
1250             data->minimum_block_->dominator_depth());
1251     }
1252 
1253     // No need to propagate unconstrained schedule early positions.
1254     if (data->minimum_block_ == schedule_->start()) return;
1255 
1256     // Propagate schedule early position.
1257     DCHECK_NOT_NULL(data->minimum_block_);
1258     for (auto use : node->uses()) {
1259       PropagateMinimumPositionToNode(data->minimum_block_, use);
1260     }
1261   }
1262 
1263   // Propagates {block} as another minimum position into the given {node}. This
1264   // has the net effect of computing the minimum dominator block of {node} that
1265   // still post-dominates all inputs to {node} when the queue is processed.
PropagateMinimumPositionToNode(BasicBlock * block,Node * node)1266   void PropagateMinimumPositionToNode(BasicBlock* block, Node* node) {
1267     Scheduler::SchedulerData* data = scheduler_->GetData(node);
1268 
1269     // No need to propagate to fixed node, it's guaranteed to be a root.
1270     if (scheduler_->GetPlacement(node) == Scheduler::kFixed) return;
1271 
1272     // Coupled nodes influence schedule early position of their control.
1273     if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
1274       Node* control = NodeProperties::GetControlInput(node);
1275       PropagateMinimumPositionToNode(block, control);
1276     }
1277 
1278     // Propagate new position if it is deeper down the dominator tree than the
1279     // current. Note that all inputs need to have minimum block position inside
1280     // the dominator chain of {node}'s minimum block position.
1281     DCHECK(InsideSameDominatorChain(block, data->minimum_block_));
1282     if (block->dominator_depth() > data->minimum_block_->dominator_depth()) {
1283       data->minimum_block_ = block;
1284       queue_.push(node);
1285       TRACE("Propagating #%d:%s minimum_block = id:%d, dominator_depth = %d\n",
1286             node->id(), node->op()->mnemonic(),
1287             data->minimum_block_->id().ToInt(),
1288             data->minimum_block_->dominator_depth());
1289     }
1290   }
1291 
1292 #if DEBUG
InsideSameDominatorChain(BasicBlock * b1,BasicBlock * b2)1293   bool InsideSameDominatorChain(BasicBlock* b1, BasicBlock* b2) {
1294     BasicBlock* dominator = BasicBlock::GetCommonDominator(b1, b2);
1295     return dominator == b1 || dominator == b2;
1296   }
1297 #endif
1298 
1299   Scheduler* scheduler_;
1300   Schedule* schedule_;
1301   ZoneQueue<Node*> queue_;
1302 };
1303 
1304 
ScheduleEarly()1305 void Scheduler::ScheduleEarly() {
1306   TRACE("--- SCHEDULE EARLY -----------------------------------------\n");
1307   if (FLAG_trace_turbo_scheduler) {
1308     TRACE("roots: ");
1309     for (Node* node : schedule_root_nodes_) {
1310       TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
1311     }
1312     TRACE("\n");
1313   }
1314 
1315   // Compute the minimum block for each node thereby determining the earliest
1316   // position each node could be placed within a valid schedule.
1317   ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
1318   schedule_early_visitor.Run(&schedule_root_nodes_);
1319 }
1320 
1321 
1322 // -----------------------------------------------------------------------------
1323 // Phase 5: Schedule nodes late.
1324 
1325 
1326 class ScheduleLateNodeVisitor {
1327  public:
ScheduleLateNodeVisitor(Zone * zone,Scheduler * scheduler)1328   ScheduleLateNodeVisitor(Zone* zone, Scheduler* scheduler)
1329       : scheduler_(scheduler),
1330         schedule_(scheduler_->schedule_),
1331         marked_(scheduler->zone_),
1332         marking_queue_(scheduler->zone_) {}
1333 
1334   // Run the schedule late algorithm on a set of fixed root nodes.
Run(NodeVector * roots)1335   void Run(NodeVector* roots) {
1336     for (Node* const root : *roots) {
1337       ProcessQueue(root);
1338     }
1339   }
1340 
1341  private:
ProcessQueue(Node * root)1342   void ProcessQueue(Node* root) {
1343     ZoneQueue<Node*>* queue = &(scheduler_->schedule_queue_);
1344     for (Node* node : root->inputs()) {
1345       // Don't schedule coupled nodes on their own.
1346       if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
1347         node = NodeProperties::GetControlInput(node);
1348       }
1349 
1350       // Test schedulability condition by looking at unscheduled use count.
1351       if (scheduler_->GetData(node)->unscheduled_count_ != 0) continue;
1352 
1353       queue->push(node);
1354       do {
1355         Node* const node = queue->front();
1356         queue->pop();
1357         VisitNode(node);
1358       } while (!queue->empty());
1359     }
1360   }
1361 
1362   // Visits one node from the queue of schedulable nodes and determines its
1363   // schedule late position. Also hoists nodes out of loops to find a more
1364   // optimal scheduling position.
VisitNode(Node * node)1365   void VisitNode(Node* node) {
1366     DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
1367 
1368     // Don't schedule nodes that are already scheduled.
1369     if (schedule_->IsScheduled(node)) return;
1370     DCHECK_EQ(Scheduler::kSchedulable, scheduler_->GetPlacement(node));
1371 
1372     // Determine the dominating block for all of the uses of this node. It is
1373     // the latest block that this node can be scheduled in.
1374     TRACE("Scheduling #%d:%s\n", node->id(), node->op()->mnemonic());
1375     BasicBlock* block = GetCommonDominatorOfUses(node);
1376     DCHECK_NOT_NULL(block);
1377 
1378     // The schedule early block dominates the schedule late block.
1379     BasicBlock* min_block = scheduler_->GetData(node)->minimum_block_;
1380     DCHECK_EQ(min_block, BasicBlock::GetCommonDominator(block, min_block));
1381     TRACE(
1382         "Schedule late of #%d:%s is id:%d at loop depth %d, minimum = id:%d\n",
1383         node->id(), node->op()->mnemonic(), block->id().ToInt(),
1384         block->loop_depth(), min_block->id().ToInt());
1385 
1386     // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
1387     // into enclosing loop pre-headers until they would preceed their schedule
1388     // early position.
1389     BasicBlock* hoist_block = GetHoistBlock(block);
1390     if (hoist_block &&
1391         hoist_block->dominator_depth() >= min_block->dominator_depth()) {
1392       do {
1393         TRACE("  hoisting #%d:%s to block id:%d\n", node->id(),
1394               node->op()->mnemonic(), hoist_block->id().ToInt());
1395         DCHECK_LT(hoist_block->loop_depth(), block->loop_depth());
1396         block = hoist_block;
1397         hoist_block = GetHoistBlock(hoist_block);
1398       } while (hoist_block &&
1399                hoist_block->dominator_depth() >= min_block->dominator_depth());
1400     } else if (scheduler_->flags_ & Scheduler::kSplitNodes) {
1401       // Split the {node} if beneficial and return the new {block} for it.
1402       block = SplitNode(block, node);
1403     }
1404 
1405     // Schedule the node or a floating control structure.
1406     if (IrOpcode::IsMergeOpcode(node->opcode())) {
1407       ScheduleFloatingControl(block, node);
1408     } else if (node->opcode() == IrOpcode::kFinishRegion) {
1409       ScheduleRegion(block, node);
1410     } else {
1411       ScheduleNode(block, node);
1412     }
1413   }
1414 
1415   // Mark {block} and push its non-marked predecessor on the marking queue.
MarkBlock(BasicBlock * block)1416   void MarkBlock(BasicBlock* block) {
1417     DCHECK_LT(block->id().ToSize(), marked_.size());
1418     marked_[block->id().ToSize()] = true;
1419     for (BasicBlock* pred_block : block->predecessors()) {
1420       DCHECK_LT(pred_block->id().ToSize(), marked_.size());
1421       if (marked_[pred_block->id().ToSize()]) continue;
1422       marking_queue_.push_back(pred_block);
1423     }
1424   }
1425 
SplitNode(BasicBlock * block,Node * node)1426   BasicBlock* SplitNode(BasicBlock* block, Node* node) {
1427     // For now, we limit splitting to pure nodes.
1428     if (!node->op()->HasProperty(Operator::kPure)) return block;
1429     // TODO(titzer): fix the special case of splitting of projections.
1430     if (node->opcode() == IrOpcode::kProjection) return block;
1431 
1432     // The {block} is common dominator of all uses of {node}, so we cannot
1433     // split anything unless the {block} has at least two successors.
1434     DCHECK_EQ(block, GetCommonDominatorOfUses(node));
1435     if (block->SuccessorCount() < 2) return block;
1436 
1437     // Clear marking bits.
1438     DCHECK(marking_queue_.empty());
1439     std::fill(marked_.begin(), marked_.end(), false);
1440     marked_.resize(schedule_->BasicBlockCount() + 1, false);
1441 
1442     // Check if the {node} has uses in {block}.
1443     for (Edge edge : node->use_edges()) {
1444       BasicBlock* use_block = GetBlockForUse(edge);
1445       if (use_block == nullptr || marked_[use_block->id().ToSize()]) continue;
1446       if (use_block == block) {
1447         TRACE("  not splitting #%d:%s, it is used in id:%d\n", node->id(),
1448               node->op()->mnemonic(), block->id().ToInt());
1449         marking_queue_.clear();
1450         return block;
1451       }
1452       MarkBlock(use_block);
1453     }
1454 
1455     // Compute transitive marking closure; a block is marked if all its
1456     // successors are marked.
1457     do {
1458       BasicBlock* top_block = marking_queue_.front();
1459       marking_queue_.pop_front();
1460       if (marked_[top_block->id().ToSize()]) continue;
1461       bool marked = true;
1462       for (BasicBlock* successor : top_block->successors()) {
1463         if (!marked_[successor->id().ToSize()]) {
1464           marked = false;
1465           break;
1466         }
1467       }
1468       if (marked) MarkBlock(top_block);
1469     } while (!marking_queue_.empty());
1470 
1471     // If the (common dominator) {block} is marked, we know that all paths from
1472     // {block} to the end contain at least one use of {node}, and hence there's
1473     // no point in splitting the {node} in this case.
1474     if (marked_[block->id().ToSize()]) {
1475       TRACE("  not splitting #%d:%s, its common dominator id:%d is perfect\n",
1476             node->id(), node->op()->mnemonic(), block->id().ToInt());
1477       return block;
1478     }
1479 
1480     // Split {node} for uses according to the previously computed marking
1481     // closure. Every marking partition has a unique dominator, which get's a
1482     // copy of the {node} with the exception of the first partition, which get's
1483     // the {node} itself.
1484     ZoneMap<BasicBlock*, Node*> dominators(scheduler_->zone_);
1485     for (Edge edge : node->use_edges()) {
1486       BasicBlock* use_block = GetBlockForUse(edge);
1487       if (use_block == nullptr) continue;
1488       while (marked_[use_block->dominator()->id().ToSize()]) {
1489         use_block = use_block->dominator();
1490       }
1491       auto& use_node = dominators[use_block];
1492       if (use_node == nullptr) {
1493         if (dominators.size() == 1u) {
1494           // Place the {node} at {use_block}.
1495           block = use_block;
1496           use_node = node;
1497           TRACE("  pushing #%d:%s down to id:%d\n", node->id(),
1498                 node->op()->mnemonic(), block->id().ToInt());
1499         } else {
1500           // Place a copy of {node} at {use_block}.
1501           use_node = CloneNode(node);
1502           TRACE("  cloning #%d:%s for id:%d\n", use_node->id(),
1503                 use_node->op()->mnemonic(), use_block->id().ToInt());
1504           scheduler_->schedule_queue_.push(use_node);
1505         }
1506       }
1507       edge.UpdateTo(use_node);
1508     }
1509     return block;
1510   }
1511 
GetHoistBlock(BasicBlock * block)1512   BasicBlock* GetHoistBlock(BasicBlock* block) {
1513     if (block->IsLoopHeader()) return block->dominator();
1514     // We have to check to make sure that the {block} dominates all
1515     // of the outgoing blocks.  If it doesn't, then there is a path
1516     // out of the loop which does not execute this {block}, so we
1517     // can't hoist operations from this {block} out of the loop, as
1518     // that would introduce additional computations.
1519     if (BasicBlock* header_block = block->loop_header()) {
1520       for (BasicBlock* outgoing_block :
1521            scheduler_->special_rpo_->GetOutgoingBlocks(header_block)) {
1522         if (BasicBlock::GetCommonDominator(block, outgoing_block) != block) {
1523           return nullptr;
1524         }
1525       }
1526       return header_block->dominator();
1527     }
1528     return nullptr;
1529   }
1530 
GetCommonDominatorOfUses(Node * node)1531   BasicBlock* GetCommonDominatorOfUses(Node* node) {
1532     BasicBlock* block = nullptr;
1533     for (Edge edge : node->use_edges()) {
1534       BasicBlock* use_block = GetBlockForUse(edge);
1535       block = block == nullptr
1536                   ? use_block
1537                   : use_block == nullptr
1538                         ? block
1539                         : BasicBlock::GetCommonDominator(block, use_block);
1540     }
1541     return block;
1542   }
1543 
FindPredecessorBlock(Node * node)1544   BasicBlock* FindPredecessorBlock(Node* node) {
1545     return scheduler_->control_flow_builder_->FindPredecessorBlock(node);
1546   }
1547 
GetBlockForUse(Edge edge)1548   BasicBlock* GetBlockForUse(Edge edge) {
1549     // TODO(titzer): ignore uses from dead nodes (not visited in PrepareUses()).
1550     // Dead uses only occur if the graph is not trimmed before scheduling.
1551     Node* use = edge.from();
1552     if (IrOpcode::IsPhiOpcode(use->opcode())) {
1553       // If the use is from a coupled (i.e. floating) phi, compute the common
1554       // dominator of its uses. This will not recurse more than one level.
1555       if (scheduler_->GetPlacement(use) == Scheduler::kCoupled) {
1556         TRACE("  inspecting uses of coupled #%d:%s\n", use->id(),
1557               use->op()->mnemonic());
1558         // TODO(titzer): reenable once above TODO is addressed.
1559         //        DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use));
1560         return GetCommonDominatorOfUses(use);
1561       }
1562       // If the use is from a fixed (i.e. non-floating) phi, we use the
1563       // predecessor block of the corresponding control input to the merge.
1564       if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
1565         TRACE("  input@%d into a fixed phi #%d:%s\n", edge.index(), use->id(),
1566               use->op()->mnemonic());
1567         Node* merge = NodeProperties::GetControlInput(use, 0);
1568         DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
1569         Node* input = NodeProperties::GetControlInput(merge, edge.index());
1570         return FindPredecessorBlock(input);
1571       }
1572     } else if (IrOpcode::IsMergeOpcode(use->opcode())) {
1573       // If the use is from a fixed (i.e. non-floating) merge, we use the
1574       // predecessor block of the current input to the merge.
1575       if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
1576         TRACE("  input@%d into a fixed merge #%d:%s\n", edge.index(), use->id(),
1577               use->op()->mnemonic());
1578         return FindPredecessorBlock(edge.to());
1579       }
1580     }
1581     BasicBlock* result = schedule_->block(use);
1582     if (result == nullptr) return nullptr;
1583     TRACE("  must dominate use #%d:%s in id:%d\n", use->id(),
1584           use->op()->mnemonic(), result->id().ToInt());
1585     return result;
1586   }
1587 
ScheduleFloatingControl(BasicBlock * block,Node * node)1588   void ScheduleFloatingControl(BasicBlock* block, Node* node) {
1589     scheduler_->FuseFloatingControl(block, node);
1590   }
1591 
ScheduleRegion(BasicBlock * block,Node * region_end)1592   void ScheduleRegion(BasicBlock* block, Node* region_end) {
1593     // We only allow regions of instructions connected into a linear
1594     // effect chain. The only value allowed to be produced by a node
1595     // in the chain must be the value consumed by the FinishRegion node.
1596 
1597     // We schedule back to front; we first schedule FinishRegion.
1598     CHECK_EQ(IrOpcode::kFinishRegion, region_end->opcode());
1599     ScheduleNode(block, region_end);
1600 
1601     // Schedule the chain.
1602     Node* node = NodeProperties::GetEffectInput(region_end);
1603     while (node->opcode() != IrOpcode::kBeginRegion) {
1604       DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
1605       DCHECK_EQ(1, node->op()->EffectInputCount());
1606       DCHECK_EQ(1, node->op()->EffectOutputCount());
1607       DCHECK_EQ(0, node->op()->ControlOutputCount());
1608       // The value output (if there is any) must be consumed
1609       // by the EndRegion node.
1610       DCHECK(node->op()->ValueOutputCount() == 0 ||
1611              node == region_end->InputAt(0));
1612       ScheduleNode(block, node);
1613       node = NodeProperties::GetEffectInput(node);
1614     }
1615     // Schedule the BeginRegion node.
1616     DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
1617     ScheduleNode(block, node);
1618   }
1619 
ScheduleNode(BasicBlock * block,Node * node)1620   void ScheduleNode(BasicBlock* block, Node* node) {
1621     schedule_->PlanNode(block, node);
1622     scheduler_->scheduled_nodes_[block->id().ToSize()].push_back(node);
1623     scheduler_->UpdatePlacement(node, Scheduler::kScheduled);
1624   }
1625 
CloneNode(Node * node)1626   Node* CloneNode(Node* node) {
1627     int const input_count = node->InputCount();
1628     for (int index = 0; index < input_count; ++index) {
1629       Node* const input = node->InputAt(index);
1630       scheduler_->IncrementUnscheduledUseCount(input, index, node);
1631     }
1632     Node* const copy = scheduler_->graph_->CloneNode(node);
1633     TRACE(("clone #%d:%s -> #%d\n"), node->id(), node->op()->mnemonic(),
1634           copy->id());
1635     scheduler_->node_data_.resize(copy->id() + 1,
1636                                   scheduler_->DefaultSchedulerData());
1637     scheduler_->node_data_[copy->id()] = scheduler_->node_data_[node->id()];
1638     return copy;
1639   }
1640 
1641   Scheduler* scheduler_;
1642   Schedule* schedule_;
1643   BoolVector marked_;
1644   ZoneDeque<BasicBlock*> marking_queue_;
1645 };
1646 
1647 
ScheduleLate()1648 void Scheduler::ScheduleLate() {
1649   TRACE("--- SCHEDULE LATE ------------------------------------------\n");
1650   if (FLAG_trace_turbo_scheduler) {
1651     TRACE("roots: ");
1652     for (Node* node : schedule_root_nodes_) {
1653       TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
1654     }
1655     TRACE("\n");
1656   }
1657 
1658   // Schedule: Places nodes in dominator block of all their uses.
1659   ScheduleLateNodeVisitor schedule_late_visitor(zone_, this);
1660   schedule_late_visitor.Run(&schedule_root_nodes_);
1661 }
1662 
1663 
1664 // -----------------------------------------------------------------------------
1665 // Phase 6: Seal the final schedule.
1666 
1667 
SealFinalSchedule()1668 void Scheduler::SealFinalSchedule() {
1669   TRACE("--- SEAL FINAL SCHEDULE ------------------------------------\n");
1670 
1671   // Serialize the assembly order and reverse-post-order numbering.
1672   special_rpo_->SerializeRPOIntoSchedule();
1673   special_rpo_->PrintAndVerifySpecialRPO();
1674 
1675   // Add collected nodes for basic blocks to their blocks in the right order.
1676   int block_num = 0;
1677   for (NodeVector& nodes : scheduled_nodes_) {
1678     BasicBlock::Id id = BasicBlock::Id::FromInt(block_num++);
1679     BasicBlock* block = schedule_->GetBlockById(id);
1680     for (Node* node : base::Reversed(nodes)) {
1681       schedule_->AddNode(block, node);
1682     }
1683   }
1684 }
1685 
1686 
1687 // -----------------------------------------------------------------------------
1688 
1689 
FuseFloatingControl(BasicBlock * block,Node * node)1690 void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
1691   TRACE("--- FUSE FLOATING CONTROL ----------------------------------\n");
1692   if (FLAG_trace_turbo_scheduler) {
1693     OFStream os(stdout);
1694     os << "Schedule before control flow fusion:\n" << *schedule_;
1695   }
1696 
1697   // Iterate on phase 1: Build control-flow graph.
1698   control_flow_builder_->Run(block, node);
1699 
1700   // Iterate on phase 2: Compute special RPO and dominator tree.
1701   special_rpo_->UpdateSpecialRPO(block, schedule_->block(node));
1702   // TODO(mstarzinger): Currently "iterate on" means "re-run". Fix that.
1703   for (BasicBlock* b = block->rpo_next(); b != nullptr; b = b->rpo_next()) {
1704     b->set_dominator_depth(-1);
1705     b->set_dominator(nullptr);
1706   }
1707   PropagateImmediateDominators(block->rpo_next());
1708 
1709   // Iterate on phase 4: Schedule nodes early.
1710   // TODO(mstarzinger): The following loop gathering the propagation roots is a
1711   // temporary solution and should be merged into the rest of the scheduler as
1712   // soon as the approach settled for all floating loops.
1713   NodeVector propagation_roots(control_flow_builder_->control_);
1714   for (Node* node : control_flow_builder_->control_) {
1715     for (Node* use : node->uses()) {
1716       if (NodeProperties::IsPhi(use)) propagation_roots.push_back(use);
1717     }
1718   }
1719   if (FLAG_trace_turbo_scheduler) {
1720     TRACE("propagation roots: ");
1721     for (Node* node : propagation_roots) {
1722       TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
1723     }
1724     TRACE("\n");
1725   }
1726   ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
1727   schedule_early_visitor.Run(&propagation_roots);
1728 
1729   // Move previously planned nodes.
1730   // TODO(mstarzinger): Improve that by supporting bulk moves.
1731   scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_));
1732   MovePlannedNodes(block, schedule_->block(node));
1733 
1734   if (FLAG_trace_turbo_scheduler) {
1735     OFStream os(stdout);
1736     os << "Schedule after control flow fusion:\n" << *schedule_;
1737   }
1738 }
1739 
1740 
MovePlannedNodes(BasicBlock * from,BasicBlock * to)1741 void Scheduler::MovePlannedNodes(BasicBlock* from, BasicBlock* to) {
1742   TRACE("Move planned nodes from id:%d to id:%d\n", from->id().ToInt(),
1743         to->id().ToInt());
1744   NodeVector* nodes = &(scheduled_nodes_[from->id().ToSize()]);
1745   for (Node* const node : *nodes) {
1746     schedule_->SetBlockForNode(to, node);
1747     scheduled_nodes_[to->id().ToSize()].push_back(node);
1748   }
1749   nodes->clear();
1750 }
1751 
1752 }  // namespace compiler
1753 }  // namespace internal
1754 }  // namespace v8
1755