• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_COMPILER_INSTRUCTION_SCHEDULER_H_
6 #define V8_COMPILER_INSTRUCTION_SCHEDULER_H_
7 
8 #include "src/compiler/instruction.h"
9 #include "src/zone-containers.h"
10 
11 namespace v8 {
12 namespace internal {
13 namespace compiler {
14 
15 // A set of flags describing properties of the instructions so that the
16 // scheduler is aware of dependencies between instructions.
17 enum ArchOpcodeFlags {
18   kNoOpcodeFlags = 0,
19   kIsBlockTerminator = 1,  // The instruction marks the end of a basic block
20                            // e.g.: jump and return instructions.
21   kHasSideEffect = 2,      // The instruction has some side effects (memory
22                            // store, function call...)
23   kIsLoadOperation = 4,    // The instruction is a memory load.
24 };
25 
26 
27 class InstructionScheduler final : public ZoneObject {
28  public:
29   InstructionScheduler(Zone* zone, InstructionSequence* sequence);
30 
31   void StartBlock(RpoNumber rpo);
32   void EndBlock(RpoNumber rpo);
33 
34   void AddInstruction(Instruction* instr);
35 
36   static bool SchedulerSupported();
37 
38  private:
39   // A scheduling graph node.
40   // Represent an instruction and their dependencies.
41   class ScheduleGraphNode: public ZoneObject {
42    public:
43     ScheduleGraphNode(Zone* zone, Instruction* instr);
44 
45     // Mark the instruction represented by 'node' as a dependecy of this one.
46     // The current instruction will be registered as an unscheduled predecessor
47     // of 'node' (i.e. it must be scheduled before 'node').
48     void AddSuccessor(ScheduleGraphNode* node);
49 
50     // Check if all the predecessors of this instruction have been scheduled.
HasUnscheduledPredecessor()51     bool HasUnscheduledPredecessor() {
52       return unscheduled_predecessors_count_ != 0;
53     }
54 
55     // Record that we have scheduled one of the predecessors of this node.
DropUnscheduledPredecessor()56     void DropUnscheduledPredecessor() {
57       DCHECK(unscheduled_predecessors_count_ > 0);
58       unscheduled_predecessors_count_--;
59     }
60 
instruction()61     Instruction* instruction() { return instr_; }
successors()62     ZoneDeque<ScheduleGraphNode*>& successors() { return successors_; }
latency()63     int latency() const { return latency_; }
64 
total_latency()65     int total_latency() const { return total_latency_; }
set_total_latency(int latency)66     void set_total_latency(int latency) { total_latency_ = latency; }
67 
start_cycle()68     int start_cycle() const { return start_cycle_; }
set_start_cycle(int start_cycle)69     void set_start_cycle(int start_cycle) { start_cycle_ = start_cycle; }
70 
71    private:
72     Instruction* instr_;
73     ZoneDeque<ScheduleGraphNode*> successors_;
74 
75     // Number of unscheduled predecessors for this node.
76     int unscheduled_predecessors_count_;
77 
78     // Estimate of the instruction latency (the number of cycles it takes for
79     // instruction to complete).
80     int latency_;
81 
82     // The sum of all the latencies on the path from this node to the end of
83     // the graph (i.e. a node with no successor).
84     int total_latency_;
85 
86     // The scheduler keeps a nominal cycle count to keep track of when the
87     // result of an instruction is available. This field is updated by the
88     // scheduler to indicate when the value of all the operands of this
89     // instruction will be available.
90     int start_cycle_;
91   };
92 
93   // Keep track of all nodes ready to be scheduled (i.e. all their dependencies
94   // have been scheduled. Note that this class is inteded to be extended by
95   // concrete implementation of the scheduling queue which define the policy
96   // to pop node from the queue.
97   class SchedulingQueueBase {
98    public:
SchedulingQueueBase(InstructionScheduler * scheduler)99     explicit SchedulingQueueBase(InstructionScheduler* scheduler)
100       : scheduler_(scheduler),
101         nodes_(scheduler->zone()) {
102     }
103 
AddNode(ScheduleGraphNode * node)104     void AddNode(ScheduleGraphNode* node) {
105       nodes_.push_back(node);
106     }
107 
IsEmpty()108     bool IsEmpty() const {
109       return nodes_.empty();
110     }
111 
112    protected:
113     InstructionScheduler* scheduler_;
114     ZoneLinkedList<ScheduleGraphNode*> nodes_;
115   };
116 
117   // A scheduling queue which prioritize nodes on the critical path (we look
118   // for the instruction with the highest latency on the path to reach the end
119   // of the graph).
120   class CriticalPathFirstQueue : public SchedulingQueueBase  {
121    public:
CriticalPathFirstQueue(InstructionScheduler * scheduler)122     explicit CriticalPathFirstQueue(InstructionScheduler* scheduler)
123       : SchedulingQueueBase(scheduler) { }
124 
125     // Look for the best candidate to schedule, remove it from the queue and
126     // return it.
127     ScheduleGraphNode* PopBestCandidate(int cycle);
128 
129    private:
130     // Compare the two nodes and return true if node1 is a better candidate than
131     // node2 (i.e. node1 should be scheduled before node2).
132     bool CompareNodes(ScheduleGraphNode *node1, ScheduleGraphNode *node2) const;
133   };
134 
135   // A queue which pop a random node from the queue to perform stress tests on
136   // the scheduler.
137   class StressSchedulerQueue : public SchedulingQueueBase  {
138    public:
StressSchedulerQueue(InstructionScheduler * scheduler)139     explicit StressSchedulerQueue(InstructionScheduler* scheduler)
140       : SchedulingQueueBase(scheduler) { }
141 
142     ScheduleGraphNode* PopBestCandidate(int cycle);
143 
144    private:
isolate()145     Isolate *isolate() {
146       return scheduler_->isolate();
147     }
148   };
149 
150   // Perform scheduling for the current block specifying the queue type to
151   // use to determine the next best candidate.
152   template <typename QueueType>
153   void ScheduleBlock();
154 
155   // Return the scheduling properties of the given instruction.
156   int GetInstructionFlags(const Instruction* instr) const;
157   int GetTargetInstructionFlags(const Instruction* instr) const;
158 
159   // Return true if instr2 uses any value defined by instr1.
160   bool HasOperandDependency(const Instruction* instr1,
161                             const Instruction* instr2) const;
162 
163   // Return true if the instruction is a basic block terminator.
164   bool IsBlockTerminator(const Instruction* instr) const;
165 
166   // Check whether the given instruction has side effects (e.g. function call,
167   // memory store).
HasSideEffect(const Instruction * instr)168   bool HasSideEffect(const Instruction* instr) const {
169     return GetInstructionFlags(instr) & kHasSideEffect;
170   }
171 
172   // Return true if the instruction is a memory load.
IsLoadOperation(const Instruction * instr)173   bool IsLoadOperation(const Instruction* instr) const {
174     return GetInstructionFlags(instr) & kIsLoadOperation;
175   }
176 
177   // Identify nops used as a definition point for live-in registers at
178   // function entry.
IsFixedRegisterParameter(const Instruction * instr)179   bool IsFixedRegisterParameter(const Instruction* instr) const {
180     return (instr->arch_opcode() == kArchNop) && (instr->OutputCount() == 1) &&
181            (instr->OutputAt(0)->IsUnallocated()) &&
182            (UnallocatedOperand::cast(instr->OutputAt(0))
183                 ->HasFixedRegisterPolicy() ||
184             UnallocatedOperand::cast(instr->OutputAt(0))
185                 ->HasFixedFPRegisterPolicy());
186   }
187 
188   void ComputeTotalLatencies();
189 
190   static int GetInstructionLatency(const Instruction* instr);
191 
zone()192   Zone* zone() { return zone_; }
sequence()193   InstructionSequence* sequence() { return sequence_; }
isolate()194   Isolate* isolate() { return sequence()->isolate(); }
195 
196   Zone* zone_;
197   InstructionSequence* sequence_;
198   ZoneVector<ScheduleGraphNode*> graph_;
199 
200   // Last side effect instruction encountered while building the graph.
201   ScheduleGraphNode* last_side_effect_instr_;
202 
203   // Set of load instructions encountered since the last side effect instruction
204   // which will be added as predecessors of the next instruction with side
205   // effects.
206   ZoneVector<ScheduleGraphNode*> pending_loads_;
207 
208   // Live-in register markers are nop instructions which are emitted at the
209   // beginning of a basic block so that the register allocator will find a
210   // defining instruction for live-in values. They must not be moved.
211   // All these nops are chained together and added as a predecessor of every
212   // other instructions in the basic block.
213   ScheduleGraphNode* last_live_in_reg_marker_;
214 
215   // Last deoptimization instruction encountered while building the graph.
216   ScheduleGraphNode* last_deopt_;
217 };
218 
219 }  // namespace compiler
220 }  // namespace internal
221 }  // namespace v8
222 
223 #endif  // V8_COMPILER_INSTRUCTION_SCHEDULER_H_
224