• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/scheduler.h"
6 
7 #include <iomanip>
8 
9 #include "src/base/iterator.h"
10 #include "src/builtins/profile-data-reader.h"
11 #include "src/codegen/tick-counter.h"
12 #include "src/compiler/common-operator.h"
13 #include "src/compiler/control-equivalence.h"
14 #include "src/compiler/graph.h"
15 #include "src/compiler/node-marker.h"
16 #include "src/compiler/node-properties.h"
17 #include "src/compiler/node.h"
18 #include "src/utils/bit-vector.h"
19 #include "src/zone/zone-containers.h"
20 
21 namespace v8 {
22 namespace internal {
23 namespace compiler {
24 
25 #define TRACE(...)                                       \
26   do {                                                   \
27     if (FLAG_trace_turbo_scheduler) PrintF(__VA_ARGS__); \
28   } while (false)
29 
Scheduler(Zone * zone,Graph * graph,Schedule * schedule,Flags flags,size_t node_count_hint,TickCounter * tick_counter,const ProfileDataFromFile * profile_data)30 Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags,
31                      size_t node_count_hint, TickCounter* tick_counter,
32                      const ProfileDataFromFile* profile_data)
33     : zone_(zone),
34       graph_(graph),
35       schedule_(schedule),
36       flags_(flags),
37       scheduled_nodes_(zone),
38       schedule_root_nodes_(zone),
39       schedule_queue_(zone),
40       node_data_(zone),
41       tick_counter_(tick_counter),
42       profile_data_(profile_data) {
43   node_data_.reserve(node_count_hint);
44   node_data_.resize(graph->NodeCount(), DefaultSchedulerData());
45 }
46 
ComputeSchedule(Zone * zone,Graph * graph,Flags flags,TickCounter * tick_counter,const ProfileDataFromFile * profile_data)47 Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags,
48                                      TickCounter* tick_counter,
49                                      const ProfileDataFromFile* profile_data) {
50   Zone* schedule_zone =
51       (flags & Scheduler::kTempSchedule) ? zone : graph->zone();
52 
53   // Reserve 10% more space for nodes if node splitting is enabled to try to
54   // avoid resizing the vector since that would triple its zone memory usage.
55   float node_hint_multiplier = (flags & Scheduler::kSplitNodes) ? 1.1 : 1;
56   size_t node_count_hint = node_hint_multiplier * graph->NodeCount();
57 
58   Schedule* schedule =
59       schedule_zone->New<Schedule>(schedule_zone, node_count_hint);
60   Scheduler scheduler(zone, graph, schedule, flags, node_count_hint,
61                       tick_counter, profile_data);
62 
63   scheduler.BuildCFG();
64   scheduler.ComputeSpecialRPONumbering();
65   scheduler.GenerateDominatorTree();
66 
67   scheduler.PrepareUses();
68   scheduler.ScheduleEarly();
69   scheduler.ScheduleLate();
70 
71   scheduler.SealFinalSchedule();
72 
73   return schedule;
74 }
75 
DefaultSchedulerData()76 Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
77   SchedulerData def = {schedule_->start(), 0, kUnknown};
78   return def;
79 }
80 
81 
GetData(Node * node)82 Scheduler::SchedulerData* Scheduler::GetData(Node* node) {
83   return &node_data_[node->id()];
84 }
85 
InitializePlacement(Node * node)86 Scheduler::Placement Scheduler::InitializePlacement(Node* node) {
87   SchedulerData* data = GetData(node);
88   if (data->placement_ == kFixed) {
89     // Nothing to do for control nodes that have been already fixed in
90     // the schedule.
91     return data->placement_;
92   }
93   DCHECK_EQ(kUnknown, data->placement_);
94   switch (node->opcode()) {
95     case IrOpcode::kParameter:
96     case IrOpcode::kOsrValue:
97       // Parameters and OSR values are always fixed to the start block.
98       data->placement_ = kFixed;
99       break;
100     case IrOpcode::kPhi:
101     case IrOpcode::kEffectPhi: {
102       // Phis and effect phis are fixed if their control inputs are, whereas
103       // otherwise they are coupled to a floating control node.
104       Placement p = GetPlacement(NodeProperties::GetControlInput(node));
105       data->placement_ = (p == kFixed ? kFixed : kCoupled);
106       break;
107     }
108 #define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
109       CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
110 #undef DEFINE_CONTROL_CASE
111       {
112         // Control nodes that were not control-reachable from end may float.
113         data->placement_ = kSchedulable;
114         break;
115     }
116     default:
117       data->placement_ = kSchedulable;
118       break;
119   }
120   return data->placement_;
121 }
122 
GetPlacement(Node * node)123 Scheduler::Placement Scheduler::GetPlacement(Node* node) {
124   return GetData(node)->placement_;
125 }
126 
IsLive(Node * node)127 bool Scheduler::IsLive(Node* node) { return GetPlacement(node) != kUnknown; }
128 
UpdatePlacement(Node * node,Placement placement)129 void Scheduler::UpdatePlacement(Node* node, Placement placement) {
130   SchedulerData* data = GetData(node);
131   if (data->placement_ == kUnknown) {
132     // We only update control nodes from {kUnknown} to {kFixed}.  Ideally, we
133     // should check that {node} is a control node (including exceptional calls),
134     // but that is expensive.
135     DCHECK_EQ(Scheduler::kFixed, placement);
136     data->placement_ = placement;
137     return;
138   }
139 
140   switch (node->opcode()) {
141     case IrOpcode::kParameter:
142       // Parameters are fixed once and for all.
143       UNREACHABLE();
144     case IrOpcode::kPhi:
145     case IrOpcode::kEffectPhi: {
146       // Phis and effect phis are coupled to their respective blocks.
147       DCHECK_EQ(Scheduler::kCoupled, data->placement_);
148       DCHECK_EQ(Scheduler::kFixed, placement);
149       Node* control = NodeProperties::GetControlInput(node);
150       BasicBlock* block = schedule_->block(control);
151       schedule_->AddNode(block, node);
152       break;
153     }
154 #define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
155       CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
156 #undef DEFINE_CONTROL_CASE
157       {
158         // Control nodes force coupled uses to be placed.
159         for (auto use : node->uses()) {
160           if (GetPlacement(use) == Scheduler::kCoupled) {
161             DCHECK_EQ(node, NodeProperties::GetControlInput(use));
162             UpdatePlacement(use, placement);
163           }
164       }
165       break;
166     }
167     default:
168       DCHECK_EQ(Scheduler::kSchedulable, data->placement_);
169       DCHECK_EQ(Scheduler::kScheduled, placement);
170       break;
171   }
172   // Reduce the use count of the node's inputs to potentially make them
173   // schedulable. If all the uses of a node have been scheduled, then the node
174   // itself can be scheduled.
175   for (Edge const edge : node->input_edges()) {
176     DecrementUnscheduledUseCount(edge.to(), edge.index(), edge.from());
177   }
178   data->placement_ = placement;
179 }
180 
181 
IsCoupledControlEdge(Node * node,int index)182 bool Scheduler::IsCoupledControlEdge(Node* node, int index) {
183   return GetPlacement(node) == kCoupled &&
184          NodeProperties::FirstControlIndex(node) == index;
185 }
186 
187 
IncrementUnscheduledUseCount(Node * node,int index,Node * from)188 void Scheduler::IncrementUnscheduledUseCount(Node* node, int index,
189                                              Node* from) {
190   // Make sure that control edges from coupled nodes are not counted.
191   if (IsCoupledControlEdge(from, index)) return;
192 
193   // Tracking use counts for fixed nodes is useless.
194   if (GetPlacement(node) == kFixed) return;
195 
196   // Use count for coupled nodes is summed up on their control.
197   if (GetPlacement(node) == kCoupled) {
198     Node* control = NodeProperties::GetControlInput(node);
199     return IncrementUnscheduledUseCount(control, index, from);
200   }
201 
202   ++(GetData(node)->unscheduled_count_);
203   if (FLAG_trace_turbo_scheduler) {
204     TRACE("  Use count of #%d:%s (used by #%d:%s)++ = %d\n", node->id(),
205           node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
206           GetData(node)->unscheduled_count_);
207   }
208 }
209 
210 
DecrementUnscheduledUseCount(Node * node,int index,Node * from)211 void Scheduler::DecrementUnscheduledUseCount(Node* node, int index,
212                                              Node* from) {
213   // Make sure that control edges from coupled nodes are not counted.
214   if (IsCoupledControlEdge(from, index)) return;
215 
216   // Tracking use counts for fixed nodes is useless.
217   if (GetPlacement(node) == kFixed) return;
218 
219   // Use count for coupled nodes is summed up on their control.
220   if (GetPlacement(node) == kCoupled) {
221     Node* control = NodeProperties::GetControlInput(node);
222     return DecrementUnscheduledUseCount(control, index, from);
223   }
224 
225   DCHECK_LT(0, GetData(node)->unscheduled_count_);
226   --(GetData(node)->unscheduled_count_);
227   if (FLAG_trace_turbo_scheduler) {
228     TRACE("  Use count of #%d:%s (used by #%d:%s)-- = %d\n", node->id(),
229           node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
230           GetData(node)->unscheduled_count_);
231   }
232   if (GetData(node)->unscheduled_count_ == 0) {
233     TRACE("    newly eligible #%d:%s\n", node->id(), node->op()->mnemonic());
234     schedule_queue_.push(node);
235   }
236 }
237 
238 
239 // -----------------------------------------------------------------------------
240 // Phase 1: Build control-flow graph.
241 
242 
243 // Internal class to build a control flow graph (i.e the basic blocks and edges
244 // between them within a Schedule) from the node graph. Visits control edges of
245 // the graph backwards from an end node in order to find the connected control
246 // subgraph, needed for scheduling.
247 class CFGBuilder : public ZoneObject {
248  public:
CFGBuilder(Zone * zone,Scheduler * scheduler)249   CFGBuilder(Zone* zone, Scheduler* scheduler)
250       : zone_(zone),
251         scheduler_(scheduler),
252         schedule_(scheduler->schedule_),
253         queued_(scheduler->graph_, 2),
254         queue_(zone),
255         control_(zone),
256         component_entry_(nullptr),
257         component_start_(nullptr),
258         component_end_(nullptr) {}
259 
260   // Run the control flow graph construction algorithm by walking the graph
261   // backwards from end through control edges, building and connecting the
262   // basic blocks for control nodes.
Run()263   void Run() {
264     ResetDataStructures();
265     Queue(scheduler_->graph_->end());
266 
267     while (!queue_.empty()) {  // Breadth-first backwards traversal.
268       scheduler_->tick_counter_->TickAndMaybeEnterSafepoint();
269       Node* node = queue_.front();
270       queue_.pop();
271       int max = NodeProperties::PastControlIndex(node);
272       for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
273         Queue(node->InputAt(i));
274       }
275     }
276 
277     for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
278       ConnectBlocks(*i);  // Connect block to its predecessor/successors.
279     }
280   }
281 
282   // Run the control flow graph construction for a minimal control-connected
283   // component ending in {exit} and merge that component into an existing
284   // control flow graph at the bottom of {block}.
Run(BasicBlock * block,Node * exit)285   void Run(BasicBlock* block, Node* exit) {
286     ResetDataStructures();
287     Queue(exit);
288 
289     component_entry_ = nullptr;
290     component_start_ = block;
291     component_end_ = schedule_->block(exit);
292     scheduler_->equivalence_->Run(exit);
293     while (!queue_.empty()) {  // Breadth-first backwards traversal.
294       scheduler_->tick_counter_->TickAndMaybeEnterSafepoint();
295       Node* node = queue_.front();
296       queue_.pop();
297 
298       // Use control dependence equivalence to find a canonical single-entry
299       // single-exit region that makes up a minimal component to be scheduled.
300       if (IsSingleEntrySingleExitRegion(node, exit)) {
301         TRACE("Found SESE at #%d:%s\n", node->id(), node->op()->mnemonic());
302         DCHECK(!component_entry_);
303         component_entry_ = node;
304         continue;
305       }
306 
307       int max = NodeProperties::PastControlIndex(node);
308       for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
309         Queue(node->InputAt(i));
310       }
311     }
312     DCHECK(component_entry_);
313 
314     for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
315       ConnectBlocks(*i);  // Connect block to its predecessor/successors.
316     }
317   }
318 
319  private:
320   friend class ScheduleLateNodeVisitor;
321   friend class Scheduler;
322 
FixNode(BasicBlock * block,Node * node)323   void FixNode(BasicBlock* block, Node* node) {
324     schedule_->AddNode(block, node);
325     scheduler_->UpdatePlacement(node, Scheduler::kFixed);
326   }
327 
Queue(Node * node)328   void Queue(Node* node) {
329     // Mark the connected control nodes as they are queued.
330     if (!queued_.Get(node)) {
331       BuildBlocks(node);
332       queue_.push(node);
333       queued_.Set(node, true);
334       control_.push_back(node);
335     }
336   }
337 
BuildBlocks(Node * node)338   void BuildBlocks(Node* node) {
339     switch (node->opcode()) {
340       case IrOpcode::kEnd:
341         FixNode(schedule_->end(), node);
342         break;
343       case IrOpcode::kStart:
344         FixNode(schedule_->start(), node);
345         break;
346       case IrOpcode::kLoop:
347       case IrOpcode::kMerge:
348         BuildBlockForNode(node);
349         break;
350       case IrOpcode::kTerminate: {
351         // Put Terminate in the loop to which it refers.
352         Node* loop = NodeProperties::GetControlInput(node);
353         BasicBlock* block = BuildBlockForNode(loop);
354         FixNode(block, node);
355         break;
356       }
357       case IrOpcode::kBranch:
358       case IrOpcode::kSwitch:
359         BuildBlocksForSuccessors(node);
360         break;
361 #define BUILD_BLOCK_JS_CASE(Name, ...) case IrOpcode::k##Name:
362         JS_OP_LIST(BUILD_BLOCK_JS_CASE)
363 // JS opcodes are just like calls => fall through.
364 #undef BUILD_BLOCK_JS_CASE
365       case IrOpcode::kCall:
366         if (NodeProperties::IsExceptionalCall(node)) {
367           BuildBlocksForSuccessors(node);
368         }
369         break;
370       default:
371         break;
372     }
373   }
374 
ConnectBlocks(Node * node)375   void ConnectBlocks(Node* node) {
376     switch (node->opcode()) {
377       case IrOpcode::kLoop:
378       case IrOpcode::kMerge:
379         ConnectMerge(node);
380         break;
381       case IrOpcode::kBranch:
382         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
383         ConnectBranch(node);
384         break;
385       case IrOpcode::kSwitch:
386         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
387         ConnectSwitch(node);
388         break;
389       case IrOpcode::kDeoptimize:
390         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
391         ConnectDeoptimize(node);
392         break;
393       case IrOpcode::kTailCall:
394         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
395         ConnectTailCall(node);
396         break;
397       case IrOpcode::kReturn:
398         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
399         ConnectReturn(node);
400         break;
401       case IrOpcode::kThrow:
402         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
403         ConnectThrow(node);
404         break;
405 #define CONNECT_BLOCK_JS_CASE(Name, ...) case IrOpcode::k##Name:
406         JS_OP_LIST(CONNECT_BLOCK_JS_CASE)
407 // JS opcodes are just like calls => fall through.
408 #undef CONNECT_BLOCK_JS_CASE
409       case IrOpcode::kCall:
410         if (NodeProperties::IsExceptionalCall(node)) {
411           scheduler_->UpdatePlacement(node, Scheduler::kFixed);
412           ConnectCall(node);
413         }
414         break;
415       default:
416         break;
417     }
418   }
419 
BuildBlockForNode(Node * node)420   BasicBlock* BuildBlockForNode(Node* node) {
421     BasicBlock* block = schedule_->block(node);
422     if (block == nullptr) {
423       block = schedule_->NewBasicBlock();
424       TRACE("Create block id:%d for #%d:%s\n", block->id().ToInt(), node->id(),
425             node->op()->mnemonic());
426       FixNode(block, node);
427     }
428     return block;
429   }
430 
BuildBlocksForSuccessors(Node * node)431   void BuildBlocksForSuccessors(Node* node) {
432     size_t const successor_cnt = node->op()->ControlOutputCount();
433     Node** successors = zone_->NewArray<Node*>(successor_cnt);
434     NodeProperties::CollectControlProjections(node, successors, successor_cnt);
435     for (size_t index = 0; index < successor_cnt; ++index) {
436       BuildBlockForNode(successors[index]);
437     }
438   }
439 
CollectSuccessorBlocks(Node * node,BasicBlock ** successor_blocks,size_t successor_cnt)440   void CollectSuccessorBlocks(Node* node, BasicBlock** successor_blocks,
441                               size_t successor_cnt) {
442     Node** successors = reinterpret_cast<Node**>(successor_blocks);
443     NodeProperties::CollectControlProjections(node, successors, successor_cnt);
444     for (size_t index = 0; index < successor_cnt; ++index) {
445       successor_blocks[index] = schedule_->block(successors[index]);
446     }
447   }
448 
FindPredecessorBlock(Node * node)449   BasicBlock* FindPredecessorBlock(Node* node) {
450     BasicBlock* predecessor_block = nullptr;
451     while (true) {
452       predecessor_block = schedule_->block(node);
453       if (predecessor_block != nullptr) break;
454       node = NodeProperties::GetControlInput(node);
455     }
456     return predecessor_block;
457   }
458 
ConnectCall(Node * call)459   void ConnectCall(Node* call) {
460     BasicBlock* successor_blocks[2];
461     CollectSuccessorBlocks(call, successor_blocks, arraysize(successor_blocks));
462 
463     // Consider the exception continuation to be deferred.
464     successor_blocks[1]->set_deferred(true);
465 
466     Node* call_control = NodeProperties::GetControlInput(call);
467     BasicBlock* call_block = FindPredecessorBlock(call_control);
468     TraceConnect(call, call_block, successor_blocks[0]);
469     TraceConnect(call, call_block, successor_blocks[1]);
470     schedule_->AddCall(call_block, call, successor_blocks[0],
471                        successor_blocks[1]);
472   }
473 
ConnectBranch(Node * branch)474   void ConnectBranch(Node* branch) {
475     BasicBlock* successor_blocks[2];
476     CollectSuccessorBlocks(branch, successor_blocks,
477                            arraysize(successor_blocks));
478 
479     BranchHint hint_from_profile = BranchHint::kNone;
480     if (const ProfileDataFromFile* profile_data = scheduler_->profile_data()) {
481       uint32_t block_zero_count =
482           profile_data->GetCounter(successor_blocks[0]->id().ToSize());
483       uint32_t block_one_count =
484           profile_data->GetCounter(successor_blocks[1]->id().ToSize());
485       // If a branch is visited a non-trivial number of times and substantially
486       // more often than its alternative, then mark it as likely.
487       constexpr uint32_t kMinimumCount = 100000;
488       constexpr uint32_t kThresholdRatio = 4000;
489       if (block_zero_count > kMinimumCount &&
490           block_zero_count / kThresholdRatio > block_one_count) {
491         hint_from_profile = BranchHint::kTrue;
492       } else if (block_one_count > kMinimumCount &&
493                  block_one_count / kThresholdRatio > block_zero_count) {
494         hint_from_profile = BranchHint::kFalse;
495       }
496     }
497 
498     // Consider branch hints.
499     switch (hint_from_profile) {
500       case BranchHint::kNone:
501         switch (BranchHintOf(branch->op())) {
502           case BranchHint::kNone:
503             break;
504           case BranchHint::kTrue:
505             successor_blocks[1]->set_deferred(true);
506             break;
507           case BranchHint::kFalse:
508             successor_blocks[0]->set_deferred(true);
509             break;
510         }
511         break;
512       case BranchHint::kTrue:
513         successor_blocks[1]->set_deferred(true);
514         break;
515       case BranchHint::kFalse:
516         successor_blocks[0]->set_deferred(true);
517         break;
518     }
519 
520     if (hint_from_profile != BranchHint::kNone &&
521         BranchHintOf(branch->op()) != BranchHint::kNone &&
522         hint_from_profile != BranchHintOf(branch->op())) {
523       PrintF("Warning: profiling data overrode manual branch hint.\n");
524     }
525 
526     if (branch == component_entry_) {
527       TraceConnect(branch, component_start_, successor_blocks[0]);
528       TraceConnect(branch, component_start_, successor_blocks[1]);
529       schedule_->InsertBranch(component_start_, component_end_, branch,
530                               successor_blocks[0], successor_blocks[1]);
531     } else {
532       Node* branch_control = NodeProperties::GetControlInput(branch);
533       BasicBlock* branch_block = FindPredecessorBlock(branch_control);
534       TraceConnect(branch, branch_block, successor_blocks[0]);
535       TraceConnect(branch, branch_block, successor_blocks[1]);
536       schedule_->AddBranch(branch_block, branch, successor_blocks[0],
537                            successor_blocks[1]);
538     }
539   }
540 
ConnectSwitch(Node * sw)541   void ConnectSwitch(Node* sw) {
542     size_t const successor_count = sw->op()->ControlOutputCount();
543     BasicBlock** successor_blocks =
544         zone_->NewArray<BasicBlock*>(successor_count);
545     CollectSuccessorBlocks(sw, successor_blocks, successor_count);
546 
547     if (sw == component_entry_) {
548       for (size_t index = 0; index < successor_count; ++index) {
549         TraceConnect(sw, component_start_, successor_blocks[index]);
550       }
551       schedule_->InsertSwitch(component_start_, component_end_, sw,
552                               successor_blocks, successor_count);
553     } else {
554       Node* switch_control = NodeProperties::GetControlInput(sw);
555       BasicBlock* switch_block = FindPredecessorBlock(switch_control);
556       for (size_t index = 0; index < successor_count; ++index) {
557         TraceConnect(sw, switch_block, successor_blocks[index]);
558       }
559       schedule_->AddSwitch(switch_block, sw, successor_blocks, successor_count);
560     }
561     for (size_t index = 0; index < successor_count; ++index) {
562       if (BranchHintOf(successor_blocks[index]->front()->op()) ==
563           BranchHint::kFalse) {
564         successor_blocks[index]->set_deferred(true);
565       }
566     }
567   }
568 
ConnectMerge(Node * merge)569   void ConnectMerge(Node* merge) {
570     // Don't connect the special merge at the end to its predecessors.
571     if (IsFinalMerge(merge)) return;
572 
573     BasicBlock* block = schedule_->block(merge);
574     DCHECK_NOT_NULL(block);
575     // For all of the merge's control inputs, add a goto at the end to the
576     // merge's basic block.
577     for (Node* const input : merge->inputs()) {
578       BasicBlock* predecessor_block = FindPredecessorBlock(input);
579       TraceConnect(merge, predecessor_block, block);
580       schedule_->AddGoto(predecessor_block, block);
581     }
582   }
583 
ConnectTailCall(Node * call)584   void ConnectTailCall(Node* call) {
585     Node* call_control = NodeProperties::GetControlInput(call);
586     BasicBlock* call_block = FindPredecessorBlock(call_control);
587     TraceConnect(call, call_block, nullptr);
588     schedule_->AddTailCall(call_block, call);
589   }
590 
ConnectReturn(Node * ret)591   void ConnectReturn(Node* ret) {
592     Node* return_control = NodeProperties::GetControlInput(ret);
593     BasicBlock* return_block = FindPredecessorBlock(return_control);
594     TraceConnect(ret, return_block, nullptr);
595     schedule_->AddReturn(return_block, ret);
596   }
597 
ConnectDeoptimize(Node * deopt)598   void ConnectDeoptimize(Node* deopt) {
599     Node* deoptimize_control = NodeProperties::GetControlInput(deopt);
600     BasicBlock* deoptimize_block = FindPredecessorBlock(deoptimize_control);
601     TraceConnect(deopt, deoptimize_block, nullptr);
602     schedule_->AddDeoptimize(deoptimize_block, deopt);
603   }
604 
ConnectThrow(Node * thr)605   void ConnectThrow(Node* thr) {
606     Node* throw_control = NodeProperties::GetControlInput(thr);
607     BasicBlock* throw_block = FindPredecessorBlock(throw_control);
608     TraceConnect(thr, throw_block, nullptr);
609     schedule_->AddThrow(throw_block, thr);
610   }
611 
TraceConnect(Node * node,BasicBlock * block,BasicBlock * succ)612   void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) {
613     DCHECK_NOT_NULL(block);
614     if (succ == nullptr) {
615       TRACE("Connect #%d:%s, id:%d -> end\n", node->id(),
616             node->op()->mnemonic(), block->id().ToInt());
617     } else {
618       TRACE("Connect #%d:%s, id:%d -> id:%d\n", node->id(),
619             node->op()->mnemonic(), block->id().ToInt(), succ->id().ToInt());
620     }
621   }
622 
IsFinalMerge(Node * node)623   bool IsFinalMerge(Node* node) {
624     return (node->opcode() == IrOpcode::kMerge &&
625             node == scheduler_->graph_->end()->InputAt(0));
626   }
627 
IsSingleEntrySingleExitRegion(Node * entry,Node * exit) const628   bool IsSingleEntrySingleExitRegion(Node* entry, Node* exit) const {
629     size_t entry_class = scheduler_->equivalence_->ClassOf(entry);
630     size_t exit_class = scheduler_->equivalence_->ClassOf(exit);
631     return entry != exit && entry_class == exit_class;
632   }
633 
ResetDataStructures()634   void ResetDataStructures() {
635     control_.clear();
636     DCHECK(queue_.empty());
637     DCHECK(control_.empty());
638   }
639 
640   Zone* zone_;
641   Scheduler* scheduler_;
642   Schedule* schedule_;
643   NodeMarker<bool> queued_;      // Mark indicating whether node is queued.
644   ZoneQueue<Node*> queue_;       // Queue used for breadth-first traversal.
645   NodeVector control_;           // List of encountered control nodes.
646   Node* component_entry_;        // Component single-entry node.
647   BasicBlock* component_start_;  // Component single-entry block.
648   BasicBlock* component_end_;    // Component single-exit block.
649 };
650 
651 
BuildCFG()652 void Scheduler::BuildCFG() {
653   TRACE("--- CREATING CFG -------------------------------------------\n");
654 
655   // Instantiate a new control equivalence algorithm for the graph.
656   equivalence_ = zone_->New<ControlEquivalence>(zone_, graph_);
657 
658   // Build a control-flow graph for the main control-connected component that
659   // is being spanned by the graph's start and end nodes.
660   control_flow_builder_ = zone_->New<CFGBuilder>(zone_, this);
661   control_flow_builder_->Run();
662 
663   // Initialize per-block data.
664   // Reserve an extra 10% to avoid resizing vector when fusing floating control.
665   scheduled_nodes_.reserve(schedule_->BasicBlockCount() * 1.1);
666   scheduled_nodes_.resize(schedule_->BasicBlockCount());
667 }
668 
669 
670 // -----------------------------------------------------------------------------
671 // Phase 2: Compute special RPO and dominator tree.
672 
673 
674 // Compute the special reverse-post-order block ordering, which is essentially
675 // a RPO of the graph where loop bodies are contiguous. Properties:
676 // 1. If block A is a predecessor of B, then A appears before B in the order,
677 //    unless B is a loop header and A is in the loop headed at B
678 //    (i.e. A -> B is a backedge).
679 // => If block A dominates block B, then A appears before B in the order.
680 // => If block A is a loop header, A appears before all blocks in the loop
681 //    headed at A.
682 // 2. All loops are contiguous in the order (i.e. no intervening blocks that
683 //    do not belong to the loop.)
684 // Note a simple RPO traversal satisfies (1) but not (2).
685 class SpecialRPONumberer : public ZoneObject {
686  public:
SpecialRPONumberer(Zone * zone,Schedule * schedule)687   SpecialRPONumberer(Zone* zone, Schedule* schedule)
688       : zone_(zone),
689         schedule_(schedule),
690         order_(nullptr),
691         beyond_end_(nullptr),
692         loops_(zone),
693         backedges_(zone),
694         stack_(zone),
695         previous_block_count_(0),
696         empty_(0, zone) {}
697 
698   // Computes the special reverse-post-order for the main control flow graph,
699   // that is for the graph spanned between the schedule's start and end blocks.
ComputeSpecialRPO()700   void ComputeSpecialRPO() {
701     DCHECK_EQ(0, schedule_->end()->SuccessorCount());
702     DCHECK(!order_);  // Main order does not exist yet.
703     ComputeAndInsertSpecialRPO(schedule_->start(), schedule_->end());
704   }
705 
706   // Computes the special reverse-post-order for a partial control flow graph,
707   // that is for the graph spanned between the given {entry} and {end} blocks,
708   // then updates the existing ordering with this new information.
UpdateSpecialRPO(BasicBlock * entry,BasicBlock * end)709   void UpdateSpecialRPO(BasicBlock* entry, BasicBlock* end) {
710     DCHECK(order_);  // Main order to be updated is present.
711     ComputeAndInsertSpecialRPO(entry, end);
712   }
713 
714   // Serialize the previously computed order as a special reverse-post-order
715   // numbering for basic blocks into the final schedule.
SerializeRPOIntoSchedule()716   void SerializeRPOIntoSchedule() {
717     int32_t number = 0;
718     for (BasicBlock* b = order_; b != nullptr; b = b->rpo_next()) {
719       b->set_rpo_number(number++);
720       schedule_->rpo_order()->push_back(b);
721     }
722     BeyondEndSentinel()->set_rpo_number(number);
723   }
724 
725   // Print and verify the special reverse-post-order.
PrintAndVerifySpecialRPO()726   void PrintAndVerifySpecialRPO() {
727 #if DEBUG
728     if (FLAG_trace_turbo_scheduler) PrintRPO();
729     VerifySpecialRPO();
730 #endif
731   }
732 
GetOutgoingBlocks(BasicBlock * block)733   const ZoneVector<BasicBlock*>& GetOutgoingBlocks(BasicBlock* block) {
734     if (HasLoopNumber(block)) {
735       LoopInfo const& loop = loops_[GetLoopNumber(block)];
736       if (loop.outgoing) return *loop.outgoing;
737     }
738     return empty_;
739   }
740 
741  private:
742   using Backedge = std::pair<BasicBlock*, size_t>;
743 
744   // Numbering for BasicBlock::rpo_number for this block traversal:
745   static const int kBlockOnStack = -2;
746   static const int kBlockVisited1 = -3;
747   static const int kBlockVisited2 = -4;
748   static const int kBlockUnvisited1 = -1;
749   static const int kBlockUnvisited2 = kBlockVisited1;
750 
751   struct SpecialRPOStackFrame {
752     BasicBlock* block;
753     size_t index;
754   };
755 
756   struct LoopInfo {
757     BasicBlock* header;
758     ZoneVector<BasicBlock*>* outgoing;
759     BitVector* members;
760     LoopInfo* prev;
761     BasicBlock* end;
762     BasicBlock* start;
763 
AddOutgoingv8::internal::compiler::SpecialRPONumberer::LoopInfo764     void AddOutgoing(Zone* zone, BasicBlock* block) {
765       if (outgoing == nullptr) {
766         outgoing = zone->New<ZoneVector<BasicBlock*>>(zone);
767       }
768       outgoing->push_back(block);
769     }
770   };
771 
Push(int depth,BasicBlock * child,int unvisited)772   int Push(int depth, BasicBlock* child, int unvisited) {
773     if (child->rpo_number() == unvisited) {
774       stack_[depth].block = child;
775       stack_[depth].index = 0;
776       child->set_rpo_number(kBlockOnStack);
777       return depth + 1;
778     }
779     return depth;
780   }
781 
PushFront(BasicBlock * head,BasicBlock * block)782   BasicBlock* PushFront(BasicBlock* head, BasicBlock* block) {
783     block->set_rpo_next(head);
784     return block;
785   }
786 
GetLoopNumber(BasicBlock * block)787   static int GetLoopNumber(BasicBlock* block) { return block->loop_number(); }
SetLoopNumber(BasicBlock * block,int loop_number)788   static void SetLoopNumber(BasicBlock* block, int loop_number) {
789     return block->set_loop_number(loop_number);
790   }
HasLoopNumber(BasicBlock * block)791   static bool HasLoopNumber(BasicBlock* block) {
792     return block->loop_number() >= 0;
793   }
794 
795   // We only need this special sentinel because some tests use the schedule's
796   // end block in actual control flow (e.g. with end having successors).
BeyondEndSentinel()797   BasicBlock* BeyondEndSentinel() {
798     if (beyond_end_ == nullptr) {
799       BasicBlock::Id id = BasicBlock::Id::FromInt(-1);
800       beyond_end_ = schedule_->zone()->New<BasicBlock>(schedule_->zone(), id);
801     }
802     return beyond_end_;
803   }
804 
805   // Compute special RPO for the control flow graph between {entry} and {end},
806   // mutating any existing order so that the result is still valid.
ComputeAndInsertSpecialRPO(BasicBlock * entry,BasicBlock * end)807   void ComputeAndInsertSpecialRPO(BasicBlock* entry, BasicBlock* end) {
808     // RPO should not have been serialized for this schedule yet.
809     CHECK_EQ(kBlockUnvisited1, schedule_->start()->loop_number());
810     CHECK_EQ(kBlockUnvisited1, schedule_->start()->rpo_number());
811     CHECK_EQ(0, static_cast<int>(schedule_->rpo_order()->size()));
812 
813     // Find correct insertion point within existing order.
814     BasicBlock* insertion_point = entry->rpo_next();
815     BasicBlock* order = insertion_point;
816 
817     // Perform an iterative RPO traversal using an explicit stack,
818     // recording backedges that form cycles. O(|B|).
819     DCHECK_LT(previous_block_count_, schedule_->BasicBlockCount());
820     stack_.resize(schedule_->BasicBlockCount() - previous_block_count_);
821     previous_block_count_ = schedule_->BasicBlockCount();
822     int stack_depth = Push(0, entry, kBlockUnvisited1);
823     int num_loops = static_cast<int>(loops_.size());
824 
825     while (stack_depth > 0) {
826       int current = stack_depth - 1;
827       SpecialRPOStackFrame* frame = &stack_[current];
828 
829       if (frame->block != end &&
830           frame->index < frame->block->SuccessorCount()) {
831         // Process the next successor.
832         BasicBlock* succ = frame->block->SuccessorAt(frame->index++);
833         if (succ->rpo_number() == kBlockVisited1) continue;
834         if (succ->rpo_number() == kBlockOnStack) {
835           // The successor is on the stack, so this is a backedge (cycle).
836           backedges_.push_back(Backedge(frame->block, frame->index - 1));
837           if (!HasLoopNumber(succ)) {
838             // Assign a new loop number to the header if it doesn't have one.
839             SetLoopNumber(succ, num_loops++);
840           }
841         } else {
842           // Push the successor onto the stack.
843           DCHECK_EQ(kBlockUnvisited1, succ->rpo_number());
844           stack_depth = Push(stack_depth, succ, kBlockUnvisited1);
845         }
846       } else {
847         // Finished with all successors; pop the stack and add the block.
848         order = PushFront(order, frame->block);
849         frame->block->set_rpo_number(kBlockVisited1);
850         stack_depth--;
851       }
852     }
853 
854     // If no loops were encountered, then the order we computed was correct.
855     if (num_loops > static_cast<int>(loops_.size())) {
856       // Otherwise, compute the loop information from the backedges in order
857       // to perform a traversal that groups loop bodies together.
858       ComputeLoopInfo(&stack_, num_loops, &backedges_);
859 
860       // Initialize the "loop stack". Note the entry could be a loop header.
861       LoopInfo* loop =
862           HasLoopNumber(entry) ? &loops_[GetLoopNumber(entry)] : nullptr;
863       order = insertion_point;
864 
865       // Perform an iterative post-order traversal, visiting loop bodies before
866       // edges that lead out of loops. Visits each block once, but linking loop
867       // sections together is linear in the loop size, so overall is
868       // O(|B| + max(loop_depth) * max(|loop|))
869       stack_depth = Push(0, entry, kBlockUnvisited2);
870       while (stack_depth > 0) {
871         SpecialRPOStackFrame* frame = &stack_[stack_depth - 1];
872         BasicBlock* block = frame->block;
873         BasicBlock* succ = nullptr;
874 
875         if (block != end && frame->index < block->SuccessorCount()) {
876           // Process the next normal successor.
877           succ = block->SuccessorAt(frame->index++);
878         } else if (HasLoopNumber(block)) {
879           // Process additional outgoing edges from the loop header.
880           if (block->rpo_number() == kBlockOnStack) {
881             // Finish the loop body the first time the header is left on the
882             // stack.
883             DCHECK(loop != nullptr && loop->header == block);
884             loop->start = PushFront(order, block);
885             order = loop->end;
886             block->set_rpo_number(kBlockVisited2);
887             // Pop the loop stack and continue visiting outgoing edges within
888             // the context of the outer loop, if any.
889             loop = loop->prev;
890             // We leave the loop header on the stack; the rest of this iteration
891             // and later iterations will go through its outgoing edges list.
892           }
893 
894           // Use the next outgoing edge if there are any.
895           size_t outgoing_index = frame->index - block->SuccessorCount();
896           LoopInfo* info = &loops_[GetLoopNumber(block)];
897           DCHECK(loop != info);
898           if (block != entry && info->outgoing != nullptr &&
899               outgoing_index < info->outgoing->size()) {
900             succ = info->outgoing->at(outgoing_index);
901             frame->index++;
902           }
903         }
904 
905         if (succ != nullptr) {
906           // Process the next successor.
907           if (succ->rpo_number() == kBlockOnStack) continue;
908           if (succ->rpo_number() == kBlockVisited2) continue;
909           DCHECK_EQ(kBlockUnvisited2, succ->rpo_number());
910           if (loop != nullptr && !loop->members->Contains(succ->id().ToInt())) {
911             // The successor is not in the current loop or any nested loop.
912             // Add it to the outgoing edges of this loop and visit it later.
913             loop->AddOutgoing(zone_, succ);
914           } else {
915             // Push the successor onto the stack.
916             stack_depth = Push(stack_depth, succ, kBlockUnvisited2);
917             if (HasLoopNumber(succ)) {
918               // Push the inner loop onto the loop stack.
919               DCHECK(GetLoopNumber(succ) < num_loops);
920               LoopInfo* next = &loops_[GetLoopNumber(succ)];
921               next->end = order;
922               next->prev = loop;
923               loop = next;
924             }
925           }
926         } else {
927           // Finished with all successors of the current block.
928           if (HasLoopNumber(block)) {
929             // If we are going to pop a loop header, then add its entire body.
930             LoopInfo* info = &loops_[GetLoopNumber(block)];
931             for (BasicBlock* b = info->start; true; b = b->rpo_next()) {
932               if (b->rpo_next() == info->end) {
933                 b->set_rpo_next(order);
934                 info->end = order;
935                 break;
936               }
937             }
938             order = info->start;
939           } else {
940             // Pop a single node off the stack and add it to the order.
941             order = PushFront(order, block);
942             block->set_rpo_number(kBlockVisited2);
943           }
944           stack_depth--;
945         }
946       }
947     }
948 
949     // Publish new order the first time.
950     if (order_ == nullptr) order_ = order;
951 
952     // Compute the correct loop headers and set the correct loop ends.
953     LoopInfo* current_loop = nullptr;
954     BasicBlock* current_header = entry->loop_header();
955     int32_t loop_depth = entry->loop_depth();
956     if (entry->IsLoopHeader()) --loop_depth;  // Entry might be a loop header.
957     for (BasicBlock* b = order; b != insertion_point; b = b->rpo_next()) {
958       BasicBlock* current = b;
959 
960       // Reset BasicBlock::rpo_number again.
961       current->set_rpo_number(kBlockUnvisited1);
962 
963       // Finish the previous loop(s) if we just exited them.
964       while (current_header != nullptr &&
965              current == current_header->loop_end()) {
966         DCHECK(current_header->IsLoopHeader());
967         DCHECK_NOT_NULL(current_loop);
968         current_loop = current_loop->prev;
969         current_header =
970             current_loop == nullptr ? nullptr : current_loop->header;
971         --loop_depth;
972       }
973       current->set_loop_header(current_header);
974 
975       // Push a new loop onto the stack if this loop is a loop header.
976       if (HasLoopNumber(current)) {
977         ++loop_depth;
978         current_loop = &loops_[GetLoopNumber(current)];
979         BasicBlock* end = current_loop->end;
980         current->set_loop_end(end == nullptr ? BeyondEndSentinel() : end);
981         current_header = current_loop->header;
982         TRACE("id:%d is a loop header, increment loop depth to %d\n",
983               current->id().ToInt(), loop_depth);
984       }
985 
986       current->set_loop_depth(loop_depth);
987 
988       if (current->loop_header() == nullptr) {
989         TRACE("id:%d is not in a loop (depth == %d)\n", current->id().ToInt(),
990               current->loop_depth());
991       } else {
992         TRACE("id:%d has loop header id:%d, (depth == %d)\n",
993               current->id().ToInt(), current->loop_header()->id().ToInt(),
994               current->loop_depth());
995       }
996     }
997   }
998 
999   // Computes loop membership from the backedges of the control flow graph.
ComputeLoopInfo(ZoneVector<SpecialRPOStackFrame> * queue,size_t num_loops,ZoneVector<Backedge> * backedges)1000   void ComputeLoopInfo(ZoneVector<SpecialRPOStackFrame>* queue,
1001                        size_t num_loops, ZoneVector<Backedge>* backedges) {
1002     // Extend existing loop membership vectors.
1003     for (LoopInfo& loop : loops_) {
1004       loop.members->Resize(static_cast<int>(schedule_->BasicBlockCount()),
1005                            zone_);
1006     }
1007 
1008     // Extend loop information vector.
1009     loops_.resize(num_loops, LoopInfo());
1010 
1011     // Compute loop membership starting from backedges.
1012     // O(max(loop_depth) * max(|loop|)
1013     for (size_t i = 0; i < backedges->size(); i++) {
1014       BasicBlock* member = backedges->at(i).first;
1015       BasicBlock* header = member->SuccessorAt(backedges->at(i).second);
1016       size_t loop_num = GetLoopNumber(header);
1017       if (loops_[loop_num].header == nullptr) {
1018         loops_[loop_num].header = header;
1019         loops_[loop_num].members = zone_->New<BitVector>(
1020             static_cast<int>(schedule_->BasicBlockCount()), zone_);
1021       }
1022 
1023       int queue_length = 0;
1024       if (member != header) {
1025         // As long as the header doesn't have a backedge to itself,
1026         // Push the member onto the queue and process its predecessors.
1027         if (!loops_[loop_num].members->Contains(member->id().ToInt())) {
1028           loops_[loop_num].members->Add(member->id().ToInt());
1029         }
1030         (*queue)[queue_length++].block = member;
1031       }
1032 
1033       // Propagate loop membership backwards. All predecessors of M up to the
1034       // loop header H are members of the loop too. O(|blocks between M and H|).
1035       while (queue_length > 0) {
1036         BasicBlock* block = (*queue)[--queue_length].block;
1037         for (size_t i = 0; i < block->PredecessorCount(); i++) {
1038           BasicBlock* pred = block->PredecessorAt(i);
1039           if (pred != header) {
1040             if (!loops_[loop_num].members->Contains(pred->id().ToInt())) {
1041               loops_[loop_num].members->Add(pred->id().ToInt());
1042               (*queue)[queue_length++].block = pred;
1043             }
1044           }
1045         }
1046       }
1047     }
1048   }
1049 
1050 #if DEBUG
PrintRPO()1051   void PrintRPO() {
1052     StdoutStream os;
1053     os << "RPO with " << loops_.size() << " loops";
1054     if (loops_.size() > 0) {
1055       os << " (";
1056       for (size_t i = 0; i < loops_.size(); i++) {
1057         if (i > 0) os << " ";
1058         os << "id:" << loops_[i].header->id();
1059       }
1060       os << ")";
1061     }
1062     os << ":\n";
1063 
1064     for (BasicBlock* block = order_; block != nullptr;
1065          block = block->rpo_next()) {
1066       os << std::setw(5) << "B" << block->rpo_number() << ":";
1067       for (size_t i = 0; i < loops_.size(); i++) {
1068         bool range = loops_[i].header->LoopContains(block);
1069         bool membership = loops_[i].header != block && range;
1070         os << (membership ? " |" : "  ");
1071         os << (range ? "x" : " ");
1072       }
1073       os << "  id:" << block->id() << ": ";
1074       if (block->loop_end() != nullptr) {
1075         os << " range: [B" << block->rpo_number() << ", B"
1076            << block->loop_end()->rpo_number() << ")";
1077       }
1078       if (block->loop_header() != nullptr) {
1079         os << " header: id:" << block->loop_header()->id();
1080       }
1081       if (block->loop_depth() > 0) {
1082         os << " depth: " << block->loop_depth();
1083       }
1084       os << "\n";
1085     }
1086   }
1087 
VerifySpecialRPO()1088   void VerifySpecialRPO() {
1089     BasicBlockVector* order = schedule_->rpo_order();
1090     DCHECK_LT(0, order->size());
1091     DCHECK_EQ(0, (*order)[0]->id().ToInt());  // entry should be first.
1092 
1093     for (size_t i = 0; i < loops_.size(); i++) {
1094       LoopInfo* loop = &loops_[i];
1095       BasicBlock* header = loop->header;
1096       BasicBlock* end = header->loop_end();
1097 
1098       DCHECK_NOT_NULL(header);
1099       DCHECK_LE(0, header->rpo_number());
1100       DCHECK_LT(header->rpo_number(), order->size());
1101       DCHECK_NOT_NULL(end);
1102       DCHECK_LE(end->rpo_number(), order->size());
1103       DCHECK_GT(end->rpo_number(), header->rpo_number());
1104       DCHECK_NE(header->loop_header(), header);
1105 
1106       // Verify the start ... end list relationship.
1107       int links = 0;
1108       BasicBlock* block = loop->start;
1109       DCHECK_EQ(header, block);
1110       bool end_found;
1111       while (true) {
1112         if (block == nullptr || block == loop->end) {
1113           end_found = (loop->end == block);
1114           break;
1115         }
1116         // The list should be in same order as the final result.
1117         DCHECK(block->rpo_number() == links + header->rpo_number());
1118         links++;
1119         block = block->rpo_next();
1120         DCHECK_LT(links, static_cast<int>(2 * order->size()));  // cycle?
1121       }
1122       DCHECK_LT(0, links);
1123       DCHECK_EQ(links, end->rpo_number() - header->rpo_number());
1124       DCHECK(end_found);
1125 
1126       // Check loop depth of the header.
1127       int loop_depth = 0;
1128       for (LoopInfo* outer = loop; outer != nullptr; outer = outer->prev) {
1129         loop_depth++;
1130       }
1131       DCHECK_EQ(loop_depth, header->loop_depth());
1132 
1133       // Check the contiguousness of loops.
1134       int count = 0;
1135       for (int j = 0; j < static_cast<int>(order->size()); j++) {
1136         BasicBlock* block = order->at(j);
1137         DCHECK_EQ(block->rpo_number(), j);
1138         if (j < header->rpo_number() || j >= end->rpo_number()) {
1139           DCHECK(!header->LoopContains(block));
1140         } else {
1141           DCHECK(header->LoopContains(block));
1142           DCHECK_GE(block->loop_depth(), loop_depth);
1143           count++;
1144         }
1145       }
1146       DCHECK_EQ(links, count);
1147     }
1148   }
1149 #endif  // DEBUG
1150 
1151   Zone* zone_;
1152   Schedule* schedule_;
1153   BasicBlock* order_;
1154   BasicBlock* beyond_end_;
1155   ZoneVector<LoopInfo> loops_;
1156   ZoneVector<Backedge> backedges_;
1157   ZoneVector<SpecialRPOStackFrame> stack_;
1158   size_t previous_block_count_;
1159   ZoneVector<BasicBlock*> const empty_;
1160 };
1161 
1162 
ComputeSpecialRPO(Zone * zone,Schedule * schedule)1163 BasicBlockVector* Scheduler::ComputeSpecialRPO(Zone* zone, Schedule* schedule) {
1164   SpecialRPONumberer numberer(zone, schedule);
1165   numberer.ComputeSpecialRPO();
1166   numberer.SerializeRPOIntoSchedule();
1167   numberer.PrintAndVerifySpecialRPO();
1168   return schedule->rpo_order();
1169 }
1170 
1171 
ComputeSpecialRPONumbering()1172 void Scheduler::ComputeSpecialRPONumbering() {
1173   TRACE("--- COMPUTING SPECIAL RPO ----------------------------------\n");
1174 
1175   // Compute the special reverse-post-order for basic blocks.
1176   special_rpo_ = zone_->New<SpecialRPONumberer>(zone_, schedule_);
1177   special_rpo_->ComputeSpecialRPO();
1178 }
1179 
1180 
PropagateImmediateDominators(BasicBlock * block)1181 void Scheduler::PropagateImmediateDominators(BasicBlock* block) {
1182   for (/*nop*/; block != nullptr; block = block->rpo_next()) {
1183     auto pred = block->predecessors().begin();
1184     auto end = block->predecessors().end();
1185     DCHECK(pred != end);  // All blocks except start have predecessors.
1186     BasicBlock* dominator = *pred;
1187     bool deferred = dominator->deferred();
1188     // For multiple predecessors, walk up the dominator tree until a common
1189     // dominator is found. Visitation order guarantees that all predecessors
1190     // except for backwards edges have been visited.
1191     for (++pred; pred != end; ++pred) {
1192       // Don't examine backwards edges.
1193       if ((*pred)->dominator_depth() < 0) continue;
1194       dominator = BasicBlock::GetCommonDominator(dominator, *pred);
1195       deferred = deferred & (*pred)->deferred();
1196     }
1197     block->set_dominator(dominator);
1198     block->set_dominator_depth(dominator->dominator_depth() + 1);
1199     block->set_deferred(deferred | block->deferred());
1200     TRACE("Block id:%d's idom is id:%d, depth = %d\n", block->id().ToInt(),
1201           dominator->id().ToInt(), block->dominator_depth());
1202   }
1203 }
1204 
GenerateDominatorTree(Schedule * schedule)1205 void Scheduler::GenerateDominatorTree(Schedule* schedule) {
1206   // Seed start block to be the first dominator.
1207   schedule->start()->set_dominator_depth(0);
1208 
1209   // Build the block dominator tree resulting from the above seed.
1210   PropagateImmediateDominators(schedule->start()->rpo_next());
1211 }
1212 
GenerateDominatorTree()1213 void Scheduler::GenerateDominatorTree() {
1214   TRACE("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n");
1215   GenerateDominatorTree(schedule_);
1216 }
1217 
1218 // -----------------------------------------------------------------------------
1219 // Phase 3: Prepare use counts for nodes.
1220 
1221 
1222 class PrepareUsesVisitor {
1223  public:
PrepareUsesVisitor(Scheduler * scheduler)1224   explicit PrepareUsesVisitor(Scheduler* scheduler)
1225       : scheduler_(scheduler), schedule_(scheduler->schedule_) {}
1226 
Pre(Node * node)1227   void Pre(Node* node) {
1228     if (scheduler_->InitializePlacement(node) == Scheduler::kFixed) {
1229       // Fixed nodes are always roots for schedule late.
1230       scheduler_->schedule_root_nodes_.push_back(node);
1231       if (!schedule_->IsScheduled(node)) {
1232         // Make sure root nodes are scheduled in their respective blocks.
1233         TRACE("Scheduling fixed position node #%d:%s\n", node->id(),
1234               node->op()->mnemonic());
1235         IrOpcode::Value opcode = node->opcode();
1236         BasicBlock* block =
1237             opcode == IrOpcode::kParameter
1238                 ? schedule_->start()
1239                 : schedule_->block(NodeProperties::GetControlInput(node));
1240         DCHECK_NOT_NULL(block);
1241         schedule_->AddNode(block, node);
1242       }
1243     }
1244   }
1245 
PostEdge(Node * from,int index,Node * to)1246   void PostEdge(Node* from, int index, Node* to) {
1247     // If the edge is from an unscheduled node, then tally it in the use count
1248     // for all of its inputs. The same criterion will be used in ScheduleLate
1249     // for decrementing use counts.
1250     if (!schedule_->IsScheduled(from)) {
1251       DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from));
1252       scheduler_->IncrementUnscheduledUseCount(to, index, from);
1253     }
1254   }
1255 
1256  private:
1257   Scheduler* scheduler_;
1258   Schedule* schedule_;
1259 };
1260 
1261 
PrepareUses()1262 void Scheduler::PrepareUses() {
1263   TRACE("--- PREPARE USES -------------------------------------------\n");
1264 
1265   // Count the uses of every node, which is used to ensure that all of a
1266   // node's uses are scheduled before the node itself.
1267   PrepareUsesVisitor prepare_uses(this);
1268 
1269   // TODO(turbofan): simplify the careful pre/post ordering here.
1270   BoolVector visited(graph_->NodeCount(), false, zone_);
1271   ZoneStack<Node::InputEdges::iterator> stack(zone_);
1272   Node* node = graph_->end();
1273   prepare_uses.Pre(node);
1274   visited[node->id()] = true;
1275   stack.push(node->input_edges().begin());
1276   while (!stack.empty()) {
1277     tick_counter_->TickAndMaybeEnterSafepoint();
1278     Edge edge = *stack.top();
1279     Node* node = edge.to();
1280     if (visited[node->id()]) {
1281       prepare_uses.PostEdge(edge.from(), edge.index(), edge.to());
1282       if (++stack.top() == edge.from()->input_edges().end()) stack.pop();
1283     } else {
1284       prepare_uses.Pre(node);
1285       visited[node->id()] = true;
1286       if (node->InputCount() > 0) stack.push(node->input_edges().begin());
1287     }
1288   }
1289 }
1290 
1291 
1292 // -----------------------------------------------------------------------------
1293 // Phase 4: Schedule nodes early.
1294 
1295 
1296 class ScheduleEarlyNodeVisitor {
1297  public:
ScheduleEarlyNodeVisitor(Zone * zone,Scheduler * scheduler)1298   ScheduleEarlyNodeVisitor(Zone* zone, Scheduler* scheduler)
1299       : scheduler_(scheduler), schedule_(scheduler->schedule_), queue_(zone) {}
1300 
1301   // Run the schedule early algorithm on a set of fixed root nodes.
Run(NodeVector * roots)1302   void Run(NodeVector* roots) {
1303     for (Node* const root : *roots) {
1304       queue_.push(root);
1305       while (!queue_.empty()) {
1306         scheduler_->tick_counter_->TickAndMaybeEnterSafepoint();
1307         VisitNode(queue_.front());
1308         queue_.pop();
1309       }
1310     }
1311   }
1312 
1313  private:
1314   // Visits one node from the queue and propagates its current schedule early
1315   // position to all uses. This in turn might push more nodes onto the queue.
VisitNode(Node * node)1316   void VisitNode(Node* node) {
1317     Scheduler::SchedulerData* data = scheduler_->GetData(node);
1318 
1319     // Fixed nodes already know their schedule early position.
1320     if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
1321       data->minimum_block_ = schedule_->block(node);
1322       TRACE("Fixing #%d:%s minimum_block = id:%d, dominator_depth = %d\n",
1323             node->id(), node->op()->mnemonic(),
1324             data->minimum_block_->id().ToInt(),
1325             data->minimum_block_->dominator_depth());
1326     }
1327 
1328     // No need to propagate unconstrained schedule early positions.
1329     if (data->minimum_block_ == schedule_->start()) return;
1330 
1331     // Propagate schedule early position.
1332     DCHECK_NOT_NULL(data->minimum_block_);
1333     for (auto use : node->uses()) {
1334       if (scheduler_->IsLive(use)) {
1335         PropagateMinimumPositionToNode(data->minimum_block_, use);
1336       }
1337     }
1338   }
1339 
1340   // Propagates {block} as another minimum position into the given {node}. This
1341   // has the net effect of computing the minimum dominator block of {node} that
1342   // still post-dominates all inputs to {node} when the queue is processed.
PropagateMinimumPositionToNode(BasicBlock * block,Node * node)1343   void PropagateMinimumPositionToNode(BasicBlock* block, Node* node) {
1344     Scheduler::SchedulerData* data = scheduler_->GetData(node);
1345 
1346     // No need to propagate to fixed node, it's guaranteed to be a root.
1347     if (scheduler_->GetPlacement(node) == Scheduler::kFixed) return;
1348 
1349     // Coupled nodes influence schedule early position of their control.
1350     if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
1351       Node* control = NodeProperties::GetControlInput(node);
1352       PropagateMinimumPositionToNode(block, control);
1353     }
1354 
1355     // Propagate new position if it is deeper down the dominator tree than the
1356     // current. Note that all inputs need to have minimum block position inside
1357     // the dominator chain of {node}'s minimum block position.
1358     DCHECK(InsideSameDominatorChain(block, data->minimum_block_));
1359     if (block->dominator_depth() > data->minimum_block_->dominator_depth()) {
1360       data->minimum_block_ = block;
1361       queue_.push(node);
1362       TRACE("Propagating #%d:%s minimum_block = id:%d, dominator_depth = %d\n",
1363             node->id(), node->op()->mnemonic(),
1364             data->minimum_block_->id().ToInt(),
1365             data->minimum_block_->dominator_depth());
1366     }
1367   }
1368 
1369 #if DEBUG
InsideSameDominatorChain(BasicBlock * b1,BasicBlock * b2)1370   bool InsideSameDominatorChain(BasicBlock* b1, BasicBlock* b2) {
1371     BasicBlock* dominator = BasicBlock::GetCommonDominator(b1, b2);
1372     return dominator == b1 || dominator == b2;
1373   }
1374 #endif
1375 
1376   Scheduler* scheduler_;
1377   Schedule* schedule_;
1378   ZoneQueue<Node*> queue_;
1379 };
1380 
1381 
ScheduleEarly()1382 void Scheduler::ScheduleEarly() {
1383   TRACE("--- SCHEDULE EARLY -----------------------------------------\n");
1384   if (FLAG_trace_turbo_scheduler) {
1385     TRACE("roots: ");
1386     for (Node* node : schedule_root_nodes_) {
1387       TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
1388     }
1389     TRACE("\n");
1390   }
1391 
1392   // Compute the minimum block for each node thereby determining the earliest
1393   // position each node could be placed within a valid schedule.
1394   ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
1395   schedule_early_visitor.Run(&schedule_root_nodes_);
1396 }
1397 
1398 
1399 // -----------------------------------------------------------------------------
1400 // Phase 5: Schedule nodes late.
1401 
1402 
1403 class ScheduleLateNodeVisitor {
1404  public:
ScheduleLateNodeVisitor(Zone * zone,Scheduler * scheduler)1405   ScheduleLateNodeVisitor(Zone* zone, Scheduler* scheduler)
1406       : zone_(zone),
1407         scheduler_(scheduler),
1408         schedule_(scheduler_->schedule_),
1409         marked_(scheduler->zone_),
1410         marking_queue_(scheduler->zone_) {}
1411 
1412   // Run the schedule late algorithm on a set of fixed root nodes.
Run(NodeVector * roots)1413   void Run(NodeVector* roots) {
1414     for (Node* const root : *roots) {
1415       ProcessQueue(root);
1416     }
1417   }
1418 
1419  private:
ProcessQueue(Node * root)1420   void ProcessQueue(Node* root) {
1421     ZoneQueue<Node*>* queue = &(scheduler_->schedule_queue_);
1422     for (Node* node : root->inputs()) {
1423       // Don't schedule coupled nodes on their own.
1424       if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
1425         node = NodeProperties::GetControlInput(node);
1426       }
1427 
1428       // Test schedulability condition by looking at unscheduled use count.
1429       if (scheduler_->GetData(node)->unscheduled_count_ != 0) continue;
1430 
1431       queue->push(node);
1432       do {
1433         scheduler_->tick_counter_->TickAndMaybeEnterSafepoint();
1434         Node* const node = queue->front();
1435         queue->pop();
1436         VisitNode(node);
1437       } while (!queue->empty());
1438     }
1439   }
1440 
1441   // Visits one node from the queue of schedulable nodes and determines its
1442   // schedule late position. Also hoists nodes out of loops to find a more
1443   // optimal scheduling position.
VisitNode(Node * node)1444   void VisitNode(Node* node) {
1445     DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
1446 
1447     // Don't schedule nodes that are already scheduled.
1448     if (schedule_->IsScheduled(node)) return;
1449     DCHECK_EQ(Scheduler::kSchedulable, scheduler_->GetPlacement(node));
1450 
1451     // Determine the dominating block for all of the uses of this node. It is
1452     // the latest block that this node can be scheduled in.
1453     TRACE("Scheduling #%d:%s\n", node->id(), node->op()->mnemonic());
1454     BasicBlock* block = GetCommonDominatorOfUses(node);
1455     DCHECK_NOT_NULL(block);
1456 
1457     // The schedule early block dominates the schedule late block.
1458     BasicBlock* min_block = scheduler_->GetData(node)->minimum_block_;
1459     DCHECK_EQ(min_block, BasicBlock::GetCommonDominator(block, min_block));
1460     TRACE(
1461         "Schedule late of #%d:%s is id:%d at loop depth %d, minimum = id:%d\n",
1462         node->id(), node->op()->mnemonic(), block->id().ToInt(),
1463         block->loop_depth(), min_block->id().ToInt());
1464 
1465     // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
1466     // into enclosing loop pre-headers until they would precede their schedule
1467     // early position.
1468     BasicBlock* hoist_block = GetHoistBlock(block);
1469     if (hoist_block &&
1470         hoist_block->dominator_depth() >= min_block->dominator_depth()) {
1471       do {
1472         TRACE("  hoisting #%d:%s to block id:%d\n", node->id(),
1473               node->op()->mnemonic(), hoist_block->id().ToInt());
1474         DCHECK_LT(hoist_block->loop_depth(), block->loop_depth());
1475         block = hoist_block;
1476         hoist_block = GetHoistBlock(hoist_block);
1477       } while (hoist_block &&
1478                hoist_block->dominator_depth() >= min_block->dominator_depth());
1479     } else if (scheduler_->flags_ & Scheduler::kSplitNodes) {
1480       // Split the {node} if beneficial and return the new {block} for it.
1481       block = SplitNode(block, node);
1482     }
1483 
1484     // Schedule the node or a floating control structure.
1485     if (IrOpcode::IsMergeOpcode(node->opcode())) {
1486       ScheduleFloatingControl(block, node);
1487     } else if (node->opcode() == IrOpcode::kFinishRegion) {
1488       ScheduleRegion(block, node);
1489     } else {
1490       ScheduleNode(block, node);
1491     }
1492   }
1493 
IsMarked(BasicBlock * block) const1494   bool IsMarked(BasicBlock* block) const {
1495     DCHECK_LT(block->id().ToSize(), marked_.size());
1496     return marked_[block->id().ToSize()];
1497   }
1498 
Mark(BasicBlock * block)1499   void Mark(BasicBlock* block) { marked_[block->id().ToSize()] = true; }
1500 
1501   // Mark {block} and push its non-marked predecessor on the marking queue.
MarkBlock(BasicBlock * block)1502   void MarkBlock(BasicBlock* block) {
1503     DCHECK_LT(block->id().ToSize(), marked_.size());
1504     Mark(block);
1505     for (BasicBlock* pred_block : block->predecessors()) {
1506       if (IsMarked(pred_block)) continue;
1507       marking_queue_.push_back(pred_block);
1508     }
1509   }
1510 
SplitNode(BasicBlock * block,Node * node)1511   BasicBlock* SplitNode(BasicBlock* block, Node* node) {
1512     // For now, we limit splitting to pure nodes.
1513     if (!node->op()->HasProperty(Operator::kPure)) return block;
1514     // TODO(titzer): fix the special case of splitting of projections.
1515     if (node->opcode() == IrOpcode::kProjection) return block;
1516 
1517     // The {block} is common dominator of all uses of {node}, so we cannot
1518     // split anything unless the {block} has at least two successors.
1519     DCHECK_EQ(block, GetCommonDominatorOfUses(node));
1520     if (block->SuccessorCount() < 2) return block;
1521 
1522     // Clear marking bits.
1523     DCHECK(marking_queue_.empty());
1524     std::fill(marked_.begin(), marked_.end(), false);
1525     marked_.resize(schedule_->BasicBlockCount() + 1, false);
1526 
1527     // Check if the {node} has uses in {block}.
1528     for (Edge edge : node->use_edges()) {
1529       if (!scheduler_->IsLive(edge.from())) continue;
1530       BasicBlock* use_block = GetBlockForUse(edge);
1531       if (use_block == nullptr || IsMarked(use_block)) continue;
1532       if (use_block == block) {
1533         TRACE("  not splitting #%d:%s, it is used in id:%d\n", node->id(),
1534               node->op()->mnemonic(), block->id().ToInt());
1535         marking_queue_.clear();
1536         return block;
1537       }
1538       MarkBlock(use_block);
1539     }
1540 
1541     // Compute transitive marking closure; a block is marked if all its
1542     // successors are marked.
1543     do {
1544       BasicBlock* top_block = marking_queue_.front();
1545       marking_queue_.pop_front();
1546       if (IsMarked(top_block)) continue;
1547       bool marked = true;
1548       for (BasicBlock* successor : top_block->successors()) {
1549         if (!IsMarked(successor)) {
1550           marked = false;
1551           break;
1552         }
1553       }
1554       if (marked) MarkBlock(top_block);
1555     } while (!marking_queue_.empty());
1556 
1557     // If the (common dominator) {block} is marked, we know that all paths from
1558     // {block} to the end contain at least one use of {node}, and hence there's
1559     // no point in splitting the {node} in this case.
1560     if (IsMarked(block)) {
1561       TRACE("  not splitting #%d:%s, its common dominator id:%d is perfect\n",
1562             node->id(), node->op()->mnemonic(), block->id().ToInt());
1563       return block;
1564     }
1565 
1566     // Split {node} for uses according to the previously computed marking
1567     // closure. Every marking partition has a unique dominator, which get's a
1568     // copy of the {node} with the exception of the first partition, which get's
1569     // the {node} itself.
1570     ZoneMap<BasicBlock*, Node*> dominators(scheduler_->zone_);
1571     for (Edge edge : node->use_edges()) {
1572       if (!scheduler_->IsLive(edge.from())) continue;
1573       BasicBlock* use_block = GetBlockForUse(edge);
1574       if (use_block == nullptr) continue;
1575       while (IsMarked(use_block->dominator())) {
1576         use_block = use_block->dominator();
1577       }
1578       auto& use_node = dominators[use_block];
1579       if (use_node == nullptr) {
1580         if (dominators.size() == 1u) {
1581           // Place the {node} at {use_block}.
1582           block = use_block;
1583           use_node = node;
1584           TRACE("  pushing #%d:%s down to id:%d\n", node->id(),
1585                 node->op()->mnemonic(), block->id().ToInt());
1586         } else {
1587           // Place a copy of {node} at {use_block}.
1588           use_node = CloneNode(node);
1589           TRACE("  cloning #%d:%s for id:%d\n", use_node->id(),
1590                 use_node->op()->mnemonic(), use_block->id().ToInt());
1591           scheduler_->schedule_queue_.push(use_node);
1592         }
1593       }
1594       edge.UpdateTo(use_node);
1595     }
1596     return block;
1597   }
1598 
GetHoistBlock(BasicBlock * block)1599   BasicBlock* GetHoistBlock(BasicBlock* block) {
1600     if (block->IsLoopHeader()) return block->dominator();
1601     // We have to check to make sure that the {block} dominates all
1602     // of the outgoing blocks.  If it doesn't, then there is a path
1603     // out of the loop which does not execute this {block}, so we
1604     // can't hoist operations from this {block} out of the loop, as
1605     // that would introduce additional computations.
1606     if (BasicBlock* header_block = block->loop_header()) {
1607       for (BasicBlock* outgoing_block :
1608            scheduler_->special_rpo_->GetOutgoingBlocks(header_block)) {
1609         if (BasicBlock::GetCommonDominator(block, outgoing_block) != block) {
1610           return nullptr;
1611         }
1612       }
1613       return header_block->dominator();
1614     }
1615     return nullptr;
1616   }
1617 
GetCommonDominatorOfUses(Node * node)1618   BasicBlock* GetCommonDominatorOfUses(Node* node) {
1619     BasicBlock* block = nullptr;
1620     for (Edge edge : node->use_edges()) {
1621       if (!scheduler_->IsLive(edge.from())) continue;
1622       BasicBlock* use_block = GetBlockForUse(edge);
1623       block = block == nullptr
1624                   ? use_block
1625                   : use_block == nullptr
1626                         ? block
1627                         : BasicBlock::GetCommonDominator(block, use_block);
1628     }
1629     return block;
1630   }
1631 
FindPredecessorBlock(Node * node)1632   BasicBlock* FindPredecessorBlock(Node* node) {
1633     return scheduler_->control_flow_builder_->FindPredecessorBlock(node);
1634   }
1635 
GetBlockForUse(Edge edge)1636   BasicBlock* GetBlockForUse(Edge edge) {
1637     // TODO(titzer): ignore uses from dead nodes (not visited in PrepareUses()).
1638     // Dead uses only occur if the graph is not trimmed before scheduling.
1639     Node* use = edge.from();
1640     if (IrOpcode::IsPhiOpcode(use->opcode())) {
1641       // If the use is from a coupled (i.e. floating) phi, compute the common
1642       // dominator of its uses. This will not recurse more than one level.
1643       if (scheduler_->GetPlacement(use) == Scheduler::kCoupled) {
1644         TRACE("  inspecting uses of coupled #%d:%s\n", use->id(),
1645               use->op()->mnemonic());
1646         // TODO(titzer): reenable once above TODO is addressed.
1647         //        DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use));
1648         return GetCommonDominatorOfUses(use);
1649       }
1650       // If the use is from a fixed (i.e. non-floating) phi, we use the
1651       // predecessor block of the corresponding control input to the merge.
1652       if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
1653         TRACE("  input@%d into a fixed phi #%d:%s\n", edge.index(), use->id(),
1654               use->op()->mnemonic());
1655         Node* merge = NodeProperties::GetControlInput(use, 0);
1656         DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
1657         Node* input = NodeProperties::GetControlInput(merge, edge.index());
1658         return FindPredecessorBlock(input);
1659       }
1660     } else if (IrOpcode::IsMergeOpcode(use->opcode())) {
1661       // If the use is from a fixed (i.e. non-floating) merge, we use the
1662       // predecessor block of the current input to the merge.
1663       if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
1664         TRACE("  input@%d into a fixed merge #%d:%s\n", edge.index(), use->id(),
1665               use->op()->mnemonic());
1666         return FindPredecessorBlock(edge.to());
1667       }
1668     }
1669     BasicBlock* result = schedule_->block(use);
1670     if (result == nullptr) return nullptr;
1671     TRACE("  must dominate use #%d:%s in id:%d\n", use->id(),
1672           use->op()->mnemonic(), result->id().ToInt());
1673     return result;
1674   }
1675 
ScheduleFloatingControl(BasicBlock * block,Node * node)1676   void ScheduleFloatingControl(BasicBlock* block, Node* node) {
1677     scheduler_->FuseFloatingControl(block, node);
1678   }
1679 
ScheduleRegion(BasicBlock * block,Node * region_end)1680   void ScheduleRegion(BasicBlock* block, Node* region_end) {
1681     // We only allow regions of instructions connected into a linear
1682     // effect chain. The only value allowed to be produced by a node
1683     // in the chain must be the value consumed by the FinishRegion node.
1684 
1685     // We schedule back to front; we first schedule FinishRegion.
1686     CHECK_EQ(IrOpcode::kFinishRegion, region_end->opcode());
1687     ScheduleNode(block, region_end);
1688 
1689     // Schedule the chain.
1690     Node* node = NodeProperties::GetEffectInput(region_end);
1691     while (node->opcode() != IrOpcode::kBeginRegion) {
1692       DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
1693       DCHECK_EQ(1, node->op()->EffectInputCount());
1694       DCHECK_EQ(1, node->op()->EffectOutputCount());
1695       DCHECK_EQ(0, node->op()->ControlOutputCount());
1696       // The value output (if there is any) must be consumed
1697       // by the EndRegion node.
1698       DCHECK(node->op()->ValueOutputCount() == 0 ||
1699              node == region_end->InputAt(0));
1700       ScheduleNode(block, node);
1701       node = NodeProperties::GetEffectInput(node);
1702     }
1703     // Schedule the BeginRegion node.
1704     DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
1705     ScheduleNode(block, node);
1706   }
1707 
ScheduleNode(BasicBlock * block,Node * node)1708   void ScheduleNode(BasicBlock* block, Node* node) {
1709     schedule_->PlanNode(block, node);
1710     size_t block_id = block->id().ToSize();
1711     if (!scheduler_->scheduled_nodes_[block_id]) {
1712       scheduler_->scheduled_nodes_[block_id] = zone_->New<NodeVector>(zone_);
1713     }
1714     scheduler_->scheduled_nodes_[block_id]->push_back(node);
1715     scheduler_->UpdatePlacement(node, Scheduler::kScheduled);
1716   }
1717 
CloneNode(Node * node)1718   Node* CloneNode(Node* node) {
1719     int const input_count = node->InputCount();
1720     for (int index = 0; index < input_count; ++index) {
1721       Node* const input = node->InputAt(index);
1722       scheduler_->IncrementUnscheduledUseCount(input, index, node);
1723     }
1724     Node* const copy = scheduler_->graph_->CloneNode(node);
1725     TRACE(("clone #%d:%s -> #%d\n"), node->id(), node->op()->mnemonic(),
1726           copy->id());
1727     scheduler_->node_data_.resize(copy->id() + 1,
1728                                   scheduler_->DefaultSchedulerData());
1729     scheduler_->node_data_[copy->id()] = scheduler_->node_data_[node->id()];
1730     return copy;
1731   }
1732 
1733   Zone* zone_;
1734   Scheduler* scheduler_;
1735   Schedule* schedule_;
1736   BoolVector marked_;
1737   ZoneDeque<BasicBlock*> marking_queue_;
1738 };
1739 
1740 
ScheduleLate()1741 void Scheduler::ScheduleLate() {
1742   TRACE("--- SCHEDULE LATE ------------------------------------------\n");
1743   if (FLAG_trace_turbo_scheduler) {
1744     TRACE("roots: ");
1745     for (Node* node : schedule_root_nodes_) {
1746       TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
1747     }
1748     TRACE("\n");
1749   }
1750 
1751   // Schedule: Places nodes in dominator block of all their uses.
1752   ScheduleLateNodeVisitor schedule_late_visitor(zone_, this);
1753   schedule_late_visitor.Run(&schedule_root_nodes_);
1754 }
1755 
1756 
1757 // -----------------------------------------------------------------------------
1758 // Phase 6: Seal the final schedule.
1759 
1760 
SealFinalSchedule()1761 void Scheduler::SealFinalSchedule() {
1762   TRACE("--- SEAL FINAL SCHEDULE ------------------------------------\n");
1763 
1764   // Serialize the assembly order and reverse-post-order numbering.
1765   special_rpo_->SerializeRPOIntoSchedule();
1766   special_rpo_->PrintAndVerifySpecialRPO();
1767 
1768   // Add collected nodes for basic blocks to their blocks in the right order.
1769   int block_num = 0;
1770   for (NodeVector* nodes : scheduled_nodes_) {
1771     BasicBlock::Id id = BasicBlock::Id::FromInt(block_num++);
1772     BasicBlock* block = schedule_->GetBlockById(id);
1773     if (nodes) {
1774       for (Node* node : base::Reversed(*nodes)) {
1775         schedule_->AddNode(block, node);
1776       }
1777     }
1778   }
1779 }
1780 
1781 
1782 // -----------------------------------------------------------------------------
1783 
1784 
FuseFloatingControl(BasicBlock * block,Node * node)1785 void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
1786   TRACE("--- FUSE FLOATING CONTROL ----------------------------------\n");
1787   if (FLAG_trace_turbo_scheduler) {
1788     StdoutStream{} << "Schedule before control flow fusion:\n" << *schedule_;
1789   }
1790 
1791   // Iterate on phase 1: Build control-flow graph.
1792   control_flow_builder_->Run(block, node);
1793 
1794   // Iterate on phase 2: Compute special RPO and dominator tree.
1795   special_rpo_->UpdateSpecialRPO(block, schedule_->block(node));
1796   // TODO(turbofan): Currently "iterate on" means "re-run". Fix that.
1797   for (BasicBlock* b = block->rpo_next(); b != nullptr; b = b->rpo_next()) {
1798     b->set_dominator_depth(-1);
1799     b->set_dominator(nullptr);
1800   }
1801   PropagateImmediateDominators(block->rpo_next());
1802 
1803   // Iterate on phase 4: Schedule nodes early.
1804   // TODO(turbofan): The following loop gathering the propagation roots is a
1805   // temporary solution and should be merged into the rest of the scheduler as
1806   // soon as the approach settled for all floating loops.
1807   NodeVector propagation_roots(control_flow_builder_->control_);
1808   for (Node* node : control_flow_builder_->control_) {
1809     for (Node* use : node->uses()) {
1810       if (NodeProperties::IsPhi(use) && IsLive(use)) {
1811         propagation_roots.push_back(use);
1812       }
1813     }
1814   }
1815   if (FLAG_trace_turbo_scheduler) {
1816     TRACE("propagation roots: ");
1817     for (Node* node : propagation_roots) {
1818       TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
1819     }
1820     TRACE("\n");
1821   }
1822   ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
1823   schedule_early_visitor.Run(&propagation_roots);
1824 
1825   // Move previously planned nodes.
1826   // TODO(turbofan): Improve that by supporting bulk moves.
1827   scheduled_nodes_.resize(schedule_->BasicBlockCount());
1828   MovePlannedNodes(block, schedule_->block(node));
1829 
1830   if (FLAG_trace_turbo_scheduler) {
1831     StdoutStream{} << "Schedule after control flow fusion:\n" << *schedule_;
1832   }
1833 }
1834 
1835 
MovePlannedNodes(BasicBlock * from,BasicBlock * to)1836 void Scheduler::MovePlannedNodes(BasicBlock* from, BasicBlock* to) {
1837   TRACE("Move planned nodes from id:%d to id:%d\n", from->id().ToInt(),
1838         to->id().ToInt());
1839   NodeVector* from_nodes = scheduled_nodes_[from->id().ToSize()];
1840   NodeVector* to_nodes = scheduled_nodes_[to->id().ToSize()];
1841   if (!from_nodes) return;
1842 
1843   for (Node* const node : *from_nodes) {
1844     schedule_->SetBlockForNode(to, node);
1845   }
1846   if (to_nodes) {
1847     to_nodes->insert(to_nodes->end(), from_nodes->begin(), from_nodes->end());
1848     from_nodes->clear();
1849   } else {
1850     std::swap(scheduled_nodes_[from->id().ToSize()],
1851               scheduled_nodes_[to->id().ToSize()]);
1852   }
1853 }
1854 
1855 #undef TRACE
1856 
1857 }  // namespace compiler
1858 }  // namespace internal
1859 }  // namespace v8
1860