• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_OPTIMIZING_SCHEDULER_H_
18 #define ART_COMPILER_OPTIMIZING_SCHEDULER_H_
19 
20 #include <fstream>
21 
22 #include "base/scoped_arena_allocator.h"
23 #include "base/scoped_arena_containers.h"
24 #include "base/stl_util.h"
25 #include "base/time_utils.h"
26 #include "code_generator.h"
27 #include "load_store_analysis.h"
28 #include "nodes.h"
29 #include "optimization.h"
30 
31 namespace art {
32 
33 // General description of instruction scheduling.
34 //
35 // This pass tries to improve the quality of the generated code by reordering
36 // instructions in the graph to avoid execution delays caused by execution
37 // dependencies.
38 // Currently, scheduling is performed at the block level, so no `HInstruction`
39 // ever leaves its block in this pass.
40 //
41 // The scheduling process iterates through blocks in the graph. For blocks that
42 // we can and want to schedule:
43 // 1) Build a dependency graph for instructions.
44 //    It includes data dependencies (inputs/uses), but also environment
45 //    dependencies and side-effect dependencies.
46 // 2) Schedule the dependency graph.
47 //    This is a topological sort of the dependency graph, using heuristics to
48 //    decide what node to scheduler first when there are multiple candidates.
49 //
50 // A few factors impacting the quality of the scheduling are:
51 // - The heuristics used to decide what node to schedule in the topological sort
52 //   when there are multiple valid candidates. There is a wide range of
53 //   complexity possible here, going from a simple model only considering
54 //   latencies, to a super detailed CPU pipeline model.
55 // - Fewer dependencies in the dependency graph give more freedom for the
56 //   scheduling heuristics. For example de-aliasing can allow possibilities for
57 //   reordering of memory accesses.
58 // - The level of abstraction of the IR. It is easier to evaluate scheduling for
59 //   IRs that translate to a single assembly instruction than for IRs
60 //   that generate multiple assembly instructions or generate different code
61 //   depending on properties of the IR.
62 // - Scheduling is performed before register allocation, it is not aware of the
63 //   impact of moving instructions on register allocation.
64 //
65 //
66 // The scheduling code uses the terms predecessors, successors, and dependencies.
67 // This can be confusing at times, so here are clarifications.
68 // These terms are used from the point of view of the program dependency graph. So
69 // the inputs of an instruction are part of its dependencies, and hence part its
70 // predecessors. So the uses of an instruction are (part of) its successors.
71 // (Side-effect dependencies can yield predecessors or successors that are not
72 // inputs or uses.)
73 //
74 // Here is a trivial example. For the Java code:
75 //
76 //    int a = 1 + 2;
77 //
78 // we would have the instructions
79 //
80 //    i1 HIntConstant 1
81 //    i2 HIntConstant 2
82 //    i3 HAdd [i1,i2]
83 //
84 // `i1` and `i2` are predecessors of `i3`.
85 // `i3` is a successor of `i1` and a successor of `i2`.
86 // In a scheduling graph for this code we would have three nodes `n1`, `n2`,
87 // and `n3` (respectively for instructions `i1`, `i1`, and `i3`).
88 // Conceptually the program dependency graph for this would contain two edges
89 //
90 //    n1 -> n3
91 //    n2 -> n3
92 //
93 // Since we schedule backwards (starting from the last instruction in each basic
94 // block), the implementation of nodes keeps a list of pointers their
95 // predecessors. So `n3` would keep pointers to its predecessors `n1` and `n2`.
96 //
97 // Node dependencies are also referred to from the program dependency graph
98 // point of view: we say that node `B` immediately depends on `A` if there is an
99 // edge from `A` to `B` in the program dependency graph. `A` is a predecessor of
100 // `B`, `B` is a successor of `A`. In the example above `n3` depends on `n1` and
101 // `n2`.
102 // Since nodes in the scheduling graph keep a list of their predecessors, node
103 // `B` will have a pointer to its predecessor `A`.
104 // As we schedule backwards, `B` will be selected for scheduling before `A` is.
105 //
106 // So the scheduling for the example above could happen as follow
107 //
108 //    |---------------------------+------------------------|
109 //    | candidates for scheduling | instructions scheduled |
110 //    | --------------------------+------------------------|
111 //
112 // The only node without successors is `n3`, so it is the only initial
113 // candidate.
114 //
115 //    | n3                        | (none)                 |
116 //
117 // We schedule `n3` as the last (and only) instruction. All its predecessors
118 // that do not have any unscheduled successors become candidate. That is, `n1`
119 // and `n2` become candidates.
120 //
121 //    | n1, n2                    | n3                     |
122 //
123 // One of the candidates is selected. In practice this is where scheduling
124 // heuristics kick in, to decide which of the candidates should be selected.
125 // In this example, let it be `n1`. It is scheduled before previously scheduled
126 // nodes (in program order). There are no other nodes to add to the list of
127 // candidates.
128 //
129 //    | n2                        | n1                     |
130 //    |                           | n3                     |
131 //
132 // The only candidate available for scheduling is `n2`. Schedule it before
133 // (in program order) the previously scheduled nodes.
134 //
135 //    | (none)                    | n2                     |
136 //    |                           | n1                     |
137 //    |                           | n3                     |
138 //    |---------------------------+------------------------|
139 //
140 // So finally the instructions will be executed in the order `i2`, `i1`, and `i3`.
141 // In this trivial example, it does not matter which of `i1` and `i2` is
142 // scheduled first since they are constants. However the same process would
143 // apply if `i1` and `i2` were actual operations (for example `HMul` and `HDiv`).
144 
145 // Set to true to have instruction scheduling dump scheduling graphs to the file
146 // `scheduling_graphs.dot`. See `SchedulingGraph::DumpAsDotGraph()`.
147 static constexpr bool kDumpDotSchedulingGraphs = false;
148 
149 // Typically used as a default instruction latency.
150 static constexpr uint32_t kGenericInstructionLatency = 1;
151 
152 class HScheduler;
153 
154 /**
155  * A node representing an `HInstruction` in the `SchedulingGraph`.
156  */
157 class SchedulingNode : public DeletableArenaObject<kArenaAllocScheduler> {
158  public:
SchedulingNode(HInstruction * instr,ScopedArenaAllocator * allocator,bool is_scheduling_barrier)159   SchedulingNode(HInstruction* instr, ScopedArenaAllocator* allocator, bool is_scheduling_barrier)
160       : latency_(0),
161         internal_latency_(0),
162         critical_path_(0),
163         instruction_(instr),
164         is_scheduling_barrier_(is_scheduling_barrier),
165         data_predecessors_(allocator->Adapter(kArenaAllocScheduler)),
166         other_predecessors_(allocator->Adapter(kArenaAllocScheduler)),
167         num_unscheduled_successors_(0) {
168     data_predecessors_.reserve(kPreallocatedPredecessors);
169   }
170 
AddDataPredecessor(SchedulingNode * predecessor)171   void AddDataPredecessor(SchedulingNode* predecessor) {
172     // Check whether the predecessor has been added earlier.
173     if (HasDataDependency(predecessor)) {
174       return;
175     }
176     data_predecessors_.push_back(predecessor);
177     predecessor->num_unscheduled_successors_++;
178   }
179 
GetDataPredecessors()180   const ScopedArenaVector<SchedulingNode*>& GetDataPredecessors() const {
181     return data_predecessors_;
182   }
183 
AddOtherPredecessor(SchedulingNode * predecessor)184   void AddOtherPredecessor(SchedulingNode* predecessor) {
185     // Check whether the predecessor has been added earlier.
186     // As an optimization of the scheduling graph, we don't need to create another dependency if
187     // there is a data dependency between scheduling nodes.
188     if (HasOtherDependency(predecessor) || HasDataDependency(predecessor)) {
189       return;
190     }
191     other_predecessors_.push_back(predecessor);
192     predecessor->num_unscheduled_successors_++;
193   }
194 
GetOtherPredecessors()195   const ScopedArenaVector<SchedulingNode*>& GetOtherPredecessors() const {
196     return other_predecessors_;
197   }
198 
DecrementNumberOfUnscheduledSuccessors()199   void DecrementNumberOfUnscheduledSuccessors() {
200     num_unscheduled_successors_--;
201   }
202 
MaybeUpdateCriticalPath(uint32_t other_critical_path)203   void MaybeUpdateCriticalPath(uint32_t other_critical_path) {
204     critical_path_ = std::max(critical_path_, other_critical_path);
205   }
206 
HasUnscheduledSuccessors()207   bool HasUnscheduledSuccessors() const {
208     return num_unscheduled_successors_ != 0;
209   }
210 
GetInstruction()211   HInstruction* GetInstruction() const { return instruction_; }
GetLatency()212   uint32_t GetLatency() const { return latency_; }
SetLatency(uint32_t latency)213   void SetLatency(uint32_t latency) { latency_ = latency; }
GetInternalLatency()214   uint32_t GetInternalLatency() const { return internal_latency_; }
SetInternalLatency(uint32_t internal_latency)215   void SetInternalLatency(uint32_t internal_latency) { internal_latency_ = internal_latency; }
GetCriticalPath()216   uint32_t GetCriticalPath() const { return critical_path_; }
IsSchedulingBarrier()217   bool IsSchedulingBarrier() const { return is_scheduling_barrier_; }
218 
HasDataDependency(const SchedulingNode * node)219   bool HasDataDependency(const SchedulingNode* node) const {
220     return ContainsElement(data_predecessors_, node);
221   }
222 
HasOtherDependency(const SchedulingNode * node)223   bool HasOtherDependency(const SchedulingNode* node) const {
224     return ContainsElement(other_predecessors_, node);
225   }
226 
227  private:
228   // The latency of this node. It represents the latency between the moment the
229   // last instruction for this node has executed to the moment the result
230   // produced by this node is available to users.
231   uint32_t latency_;
232   // This represents the time spent *within* the generated code for this node.
233   // It should be zero for nodes that only generate a single instruction.
234   uint32_t internal_latency_;
235 
236   // The critical path from this instruction to the end of scheduling. It is
237   // used by the scheduling heuristics to measure the priority of this instruction.
238   // It is defined as
239   //     critical_path_ = latency_ + max((use.internal_latency_ + use.critical_path_) for all uses)
240   // (Note that here 'uses' is equivalent to 'data successors'. Also see comments in
241   // `HScheduler::Schedule(SchedulingNode* scheduling_node)`).
242   uint32_t critical_path_;
243 
244   // The instruction that this node represents.
245   HInstruction* const instruction_;
246 
247   // If a node is scheduling barrier, other nodes cannot be scheduled before it.
248   const bool is_scheduling_barrier_;
249 
250   // The lists of predecessors. They cannot be scheduled before this node. Once
251   // this node is scheduled, we check whether any of its predecessors has become a
252   // valid candidate for scheduling.
253   // Predecessors in `data_predecessors_` are data dependencies. Those in
254   // `other_predecessors_` contain side-effect dependencies, environment
255   // dependencies, and scheduling barrier dependencies.
256   ScopedArenaVector<SchedulingNode*> data_predecessors_;
257   ScopedArenaVector<SchedulingNode*> other_predecessors_;
258 
259   // The number of unscheduled successors for this node. This number is
260   // decremented as successors are scheduled. When it reaches zero this node
261   // becomes a valid candidate to schedule.
262   uint32_t num_unscheduled_successors_;
263 
264   static constexpr size_t kPreallocatedPredecessors = 4;
265 };
266 
267 /*
268  * Provide analysis of instruction dependencies (side effects) which are not in a form of explicit
269  * def-use data dependencies.
270  */
271 class SideEffectDependencyAnalysis {
272  public:
SideEffectDependencyAnalysis(const HeapLocationCollector * heap_location_collector)273   explicit SideEffectDependencyAnalysis(const HeapLocationCollector* heap_location_collector)
274       : memory_dependency_analysis_(heap_location_collector) {}
275 
HasSideEffectDependency(HInstruction * instr1,HInstruction * instr2)276   bool HasSideEffectDependency(HInstruction* instr1, HInstruction* instr2) const {
277     if (memory_dependency_analysis_.HasMemoryDependency(instr1, instr2)) {
278       return true;
279     }
280 
281     // Even if above memory dependency check has passed, it is still necessary to
282     // check dependencies between instructions that can throw and instructions
283     // that write to memory.
284     if (HasExceptionDependency(instr1, instr2)) {
285       return true;
286     }
287 
288     return false;
289   }
290 
291  private:
292   static bool HasExceptionDependency(const HInstruction* instr1, const HInstruction* instr2);
293   static bool HasReorderingDependency(const HInstruction* instr1, const HInstruction* instr2);
294 
295   /*
296    * Memory dependency analysis of instructions based on their memory side effects
297    * and heap location information from the LCA pass if it is provided.
298    */
299   class MemoryDependencyAnalysis {
300    public:
MemoryDependencyAnalysis(const HeapLocationCollector * heap_location_collector)301     explicit MemoryDependencyAnalysis(const HeapLocationCollector* heap_location_collector)
302         : heap_location_collector_(heap_location_collector) {}
303 
304     bool HasMemoryDependency(HInstruction* instr1, HInstruction* instr2) const;
305 
306    private:
307     bool ArrayAccessMayAlias(HInstruction* instr1, HInstruction* instr2) const;
308     bool FieldAccessMayAlias(const HInstruction* instr1, const HInstruction* instr2) const;
309     size_t ArrayAccessHeapLocation(HInstruction* instruction) const;
310     size_t FieldAccessHeapLocation(const HInstruction* instruction) const;
311 
312     const HeapLocationCollector* const heap_location_collector_;
313   };
314 
315   MemoryDependencyAnalysis memory_dependency_analysis_;
316 };
317 
318 /*
319  * Directed acyclic graph for scheduling.
320  */
321 class SchedulingGraph : public ValueObject {
322  public:
SchedulingGraph(ScopedArenaAllocator * allocator,const HeapLocationCollector * heap_location_collector)323   SchedulingGraph(ScopedArenaAllocator* allocator,
324                   const HeapLocationCollector* heap_location_collector)
325       : allocator_(allocator),
326         contains_scheduling_barrier_(false),
327         nodes_map_(allocator_->Adapter(kArenaAllocScheduler)),
328         side_effect_dependency_analysis_(heap_location_collector) {}
329 
330   SchedulingNode* AddNode(HInstruction* instr, bool is_scheduling_barrier = false) {
331     std::unique_ptr<SchedulingNode> node(
332         new (allocator_) SchedulingNode(instr, allocator_, is_scheduling_barrier));
333     SchedulingNode* result = node.get();
334     nodes_map_.insert(std::make_pair(instr, std::move(node)));
335     contains_scheduling_barrier_ |= is_scheduling_barrier;
336     AddDependencies(result, is_scheduling_barrier);
337     return result;
338   }
339 
GetNode(const HInstruction * instr)340   SchedulingNode* GetNode(const HInstruction* instr) const {
341     auto it = nodes_map_.find(instr);
342     if (it == nodes_map_.end()) {
343       return nullptr;
344     } else {
345       return it->second.get();
346     }
347   }
348 
Size()349   size_t Size() const {
350     return nodes_map_.size();
351   }
352 
353   // Dump the scheduling graph, in dot file format, appending it to the file
354   // `scheduling_graphs.dot`.
355   void DumpAsDotGraph(const std::string& description,
356                       const ScopedArenaVector<SchedulingNode*>& initial_candidates);
357 
358  protected:
359   void AddDependency(SchedulingNode* node, SchedulingNode* dependency, bool is_data_dependency);
AddDataDependency(SchedulingNode * node,SchedulingNode * dependency)360   void AddDataDependency(SchedulingNode* node, SchedulingNode* dependency) {
361     AddDependency(node, dependency, /*is_data_dependency*/true);
362   }
AddOtherDependency(SchedulingNode * node,SchedulingNode * dependency)363   void AddOtherDependency(SchedulingNode* node, SchedulingNode* dependency) {
364     AddDependency(node, dependency, /*is_data_dependency*/false);
365   }
366 
367   // Analyze whether the scheduling node has cross-iteration dependencies which mean it uses
368   // values defined on the previous iteration.
369   //
370   // Supported cases:
371   //
372   //   L:
373   //     v2 = loop_head_phi(v1)
374   //     instr1(v2)
375   //     v1 = instr2
376   //     goto L
377   //
378   // In such cases moving instr2 before instr1 creates intersecting live ranges
379   // of v1 and v2. As a result a separate register is needed to keep the value
380   // defined by instr2 which is only used on the next iteration.
381   // If instr2 is not moved, no additional register is needed. The register
382   // used by instr1 is reused.
383   // To prevent such a situation a "other" dependency between instr1 and instr2 must be set.
384   void AddCrossIterationDependencies(SchedulingNode* node);
385 
386   // Add dependencies nodes for the given `SchedulingNode`: inputs, environments, and side-effects.
387   void AddDependencies(SchedulingNode* node, bool is_scheduling_barrier = false);
388 
389   ScopedArenaAllocator* const allocator_;
390   bool contains_scheduling_barrier_;
391   ScopedArenaHashMap<const HInstruction*, std::unique_ptr<SchedulingNode>> nodes_map_;
392   SideEffectDependencyAnalysis side_effect_dependency_analysis_;
393 };
394 
395 /*
396  * The visitors derived from this base class are used by schedulers to evaluate
397  * the latencies of `HInstruction`s.
398  */
399 class SchedulingLatencyVisitor : public HGraphDelegateVisitor {
400  public:
401   // This class and its sub-classes will never be used to drive a visit of an
402   // `HGraph` but only to visit `HInstructions` one at a time, so we do not need
403   // to pass a valid graph to `HGraphDelegateVisitor()`.
SchedulingLatencyVisitor()404   SchedulingLatencyVisitor()
405       : HGraphDelegateVisitor(nullptr),
406         last_visited_latency_(0),
407         last_visited_internal_latency_(0) {}
408 
VisitInstruction(HInstruction * instruction)409   void VisitInstruction(HInstruction* instruction) override {
410     LOG(FATAL) << "Error visiting " << instruction->DebugName() << ". "
411         "Architecture-specific scheduling latency visitors must handle all instructions"
412         " (potentially by overriding the generic `VisitInstruction()`.";
413     UNREACHABLE();
414   }
415 
Visit(HInstruction * instruction)416   void Visit(HInstruction* instruction) {
417     instruction->Accept(this);
418   }
419 
CalculateLatency(SchedulingNode * node)420   void CalculateLatency(SchedulingNode* node) {
421     // By default nodes have no internal latency.
422     last_visited_internal_latency_ = 0;
423     Visit(node->GetInstruction());
424   }
425 
GetLastVisitedLatency()426   uint32_t GetLastVisitedLatency() const { return last_visited_latency_; }
GetLastVisitedInternalLatency()427   uint32_t GetLastVisitedInternalLatency() const { return last_visited_internal_latency_; }
428 
429  protected:
430   // The latency of the most recent visited SchedulingNode.
431   // This is for reporting the latency value to the user of this visitor.
432   uint32_t last_visited_latency_;
433   // This represents the time spent *within* the generated code for the most recent visited
434   // SchedulingNode. This is for reporting the internal latency value to the user of this visitor.
435   uint32_t last_visited_internal_latency_;
436 };
437 
438 class SchedulingNodeSelector : public ArenaObject<kArenaAllocScheduler> {
439  public:
Reset()440   virtual void Reset() {}
441   virtual SchedulingNode* PopHighestPriorityNode(ScopedArenaVector<SchedulingNode*>* nodes,
442                                                  const SchedulingGraph& graph) = 0;
~SchedulingNodeSelector()443   virtual ~SchedulingNodeSelector() {}
444  protected:
DeleteNodeAtIndex(ScopedArenaVector<SchedulingNode * > * nodes,size_t index)445   static void DeleteNodeAtIndex(ScopedArenaVector<SchedulingNode*>* nodes, size_t index) {
446     (*nodes)[index] = nodes->back();
447     nodes->pop_back();
448   }
449 };
450 
451 /*
452  * Select a `SchedulingNode` at random within the candidates.
453  */
454 class RandomSchedulingNodeSelector : public SchedulingNodeSelector {
455  public:
RandomSchedulingNodeSelector()456   RandomSchedulingNodeSelector() : seed_(0) {
457     seed_  = static_cast<uint32_t>(NanoTime());
458     srand(seed_);
459   }
460 
PopHighestPriorityNode(ScopedArenaVector<SchedulingNode * > * nodes,const SchedulingGraph & graph)461   SchedulingNode* PopHighestPriorityNode(ScopedArenaVector<SchedulingNode*>* nodes,
462                                          const SchedulingGraph& graph) override {
463     UNUSED(graph);
464     DCHECK(!nodes->empty());
465     size_t select = rand_r(&seed_) % nodes->size();
466     SchedulingNode* select_node = (*nodes)[select];
467     DeleteNodeAtIndex(nodes, select);
468     return select_node;
469   }
470 
471   uint32_t seed_;
472 };
473 
474 /*
475  * Select a `SchedulingNode` according to critical path information,
476  * with heuristics to favor certain instruction patterns like materialized condition.
477  */
478 class CriticalPathSchedulingNodeSelector : public SchedulingNodeSelector {
479  public:
CriticalPathSchedulingNodeSelector()480   CriticalPathSchedulingNodeSelector() : prev_select_(nullptr) {}
481 
Reset()482   void Reset() override { prev_select_ = nullptr; }
483   SchedulingNode* PopHighestPriorityNode(ScopedArenaVector<SchedulingNode*>* nodes,
484                                          const SchedulingGraph& graph) override;
485 
486  protected:
487   SchedulingNode* GetHigherPrioritySchedulingNode(SchedulingNode* candidate,
488                                                   SchedulingNode* check) const;
489 
490   SchedulingNode* SelectMaterializedCondition(ScopedArenaVector<SchedulingNode*>* nodes,
491                                               const SchedulingGraph& graph) const;
492 
493  private:
494   const SchedulingNode* prev_select_;
495 };
496 
497 class HScheduler {
498  public:
HScheduler(SchedulingLatencyVisitor * latency_visitor,SchedulingNodeSelector * selector)499   HScheduler(SchedulingLatencyVisitor* latency_visitor, SchedulingNodeSelector* selector)
500       : latency_visitor_(latency_visitor),
501         selector_(selector),
502         only_optimize_loop_blocks_(true),
503         cursor_(nullptr) {}
~HScheduler()504   virtual ~HScheduler() {}
505 
506   void Schedule(HGraph* graph);
507 
SetOnlyOptimizeLoopBlocks(bool loop_only)508   void SetOnlyOptimizeLoopBlocks(bool loop_only) { only_optimize_loop_blocks_ = loop_only; }
509 
510   // Instructions can not be rescheduled across a scheduling barrier.
511   virtual bool IsSchedulingBarrier(const HInstruction* instruction) const;
512 
513  protected:
514   void Schedule(HBasicBlock* block, const HeapLocationCollector* heap_location_collector);
515   void Schedule(SchedulingNode* scheduling_node,
516                 /*inout*/ ScopedArenaVector<SchedulingNode*>* candidates);
517   void Schedule(HInstruction* instruction);
518 
519   // Any instruction returning `false` via this method will prevent its
520   // containing basic block from being scheduled.
521   // This method is used to restrict scheduling to instructions that we know are
522   // safe to handle.
523   //
524   // For newly introduced instructions by default HScheduler::IsSchedulable returns false.
525   // HScheduler${ARCH}::IsSchedulable can be overridden to return true for an instruction (see
526   // scheduler_arm64.h for example) if it is safe to schedule it; in this case one *must* also
527   // look at/update HScheduler${ARCH}::IsSchedulingBarrier for this instruction.
528   virtual bool IsSchedulable(const HInstruction* instruction) const;
529   bool IsSchedulable(const HBasicBlock* block) const;
530 
CalculateLatency(SchedulingNode * node)531   void CalculateLatency(SchedulingNode* node) {
532     latency_visitor_->CalculateLatency(node);
533     node->SetLatency(latency_visitor_->GetLastVisitedLatency());
534     node->SetInternalLatency(latency_visitor_->GetLastVisitedInternalLatency());
535   }
536 
537   SchedulingLatencyVisitor* const latency_visitor_;
538   SchedulingNodeSelector* const selector_;
539   bool only_optimize_loop_blocks_;
540 
541   // A pointer indicating where the next instruction to be scheduled will be inserted.
542   HInstruction* cursor_;
543 
544  private:
545   DISALLOW_COPY_AND_ASSIGN(HScheduler);
546 };
547 
548 class HInstructionScheduling : public HOptimization {
549  public:
550   HInstructionScheduling(HGraph* graph,
551                          InstructionSet instruction_set,
552                          CodeGenerator* cg = nullptr,
553                          const char* name = kInstructionSchedulingPassName)
HOptimization(graph,name)554       : HOptimization(graph, name),
555         codegen_(cg),
556         instruction_set_(instruction_set) {}
557 
Run()558   bool Run() override {
559     return Run(/*only_optimize_loop_blocks*/ true, /*schedule_randomly*/ false);
560   }
561 
562   bool Run(bool only_optimize_loop_blocks, bool schedule_randomly);
563 
564   static constexpr const char* kInstructionSchedulingPassName = "scheduler";
565 
566  private:
567   CodeGenerator* const codegen_;
568   const InstructionSet instruction_set_;
569   DISALLOW_COPY_AND_ASSIGN(HInstructionScheduling);
570 };
571 
572 }  // namespace art
573 
574 #endif  // ART_COMPILER_OPTIMIZING_SCHEDULER_H_
575