• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_COMPILER_INSTRUCTION_SCHEDULER_H_
6 #define V8_COMPILER_INSTRUCTION_SCHEDULER_H_
7 
8 #include "src/compiler/instruction.h"
9 #include "src/zone/zone-containers.h"
10 
11 namespace v8 {
12 namespace internal {
13 namespace compiler {
14 
15 // A set of flags describing properties of the instructions so that the
16 // scheduler is aware of dependencies between instructions.
17 enum ArchOpcodeFlags {
18   kNoOpcodeFlags = 0,
19   kIsBlockTerminator = 1,  // The instruction marks the end of a basic block
20                            // e.g.: jump and return instructions.
21   kHasSideEffect = 2,      // The instruction has some side effects (memory
22                            // store, function call...)
23   kIsLoadOperation = 4,    // The instruction is a memory load.
24   kMayNeedDeoptCheck = 8,  // The instruction might be associated with a deopt
25                            // check. This is the case of instruction which can
26                            // blow up with particular inputs (e.g.: division by
27                            // zero on Intel platforms).
28 };
29 
30 class InstructionScheduler final : public ZoneObject {
31  public:
32   InstructionScheduler(Zone* zone, InstructionSequence* sequence);
33 
34   void StartBlock(RpoNumber rpo);
35   void EndBlock(RpoNumber rpo);
36 
37   void AddInstruction(Instruction* instr);
38 
39   static bool SchedulerSupported();
40 
41  private:
42   // A scheduling graph node.
43   // Represent an instruction and their dependencies.
44   class ScheduleGraphNode: public ZoneObject {
45    public:
46     ScheduleGraphNode(Zone* zone, Instruction* instr);
47 
48     // Mark the instruction represented by 'node' as a dependecy of this one.
49     // The current instruction will be registered as an unscheduled predecessor
50     // of 'node' (i.e. it must be scheduled before 'node').
51     void AddSuccessor(ScheduleGraphNode* node);
52 
53     // Check if all the predecessors of this instruction have been scheduled.
HasUnscheduledPredecessor()54     bool HasUnscheduledPredecessor() {
55       return unscheduled_predecessors_count_ != 0;
56     }
57 
58     // Record that we have scheduled one of the predecessors of this node.
DropUnscheduledPredecessor()59     void DropUnscheduledPredecessor() {
60       DCHECK(unscheduled_predecessors_count_ > 0);
61       unscheduled_predecessors_count_--;
62     }
63 
instruction()64     Instruction* instruction() { return instr_; }
successors()65     ZoneDeque<ScheduleGraphNode*>& successors() { return successors_; }
latency()66     int latency() const { return latency_; }
67 
total_latency()68     int total_latency() const { return total_latency_; }
set_total_latency(int latency)69     void set_total_latency(int latency) { total_latency_ = latency; }
70 
start_cycle()71     int start_cycle() const { return start_cycle_; }
set_start_cycle(int start_cycle)72     void set_start_cycle(int start_cycle) { start_cycle_ = start_cycle; }
73 
74    private:
75     Instruction* instr_;
76     ZoneDeque<ScheduleGraphNode*> successors_;
77 
78     // Number of unscheduled predecessors for this node.
79     int unscheduled_predecessors_count_;
80 
81     // Estimate of the instruction latency (the number of cycles it takes for
82     // instruction to complete).
83     int latency_;
84 
85     // The sum of all the latencies on the path from this node to the end of
86     // the graph (i.e. a node with no successor).
87     int total_latency_;
88 
89     // The scheduler keeps a nominal cycle count to keep track of when the
90     // result of an instruction is available. This field is updated by the
91     // scheduler to indicate when the value of all the operands of this
92     // instruction will be available.
93     int start_cycle_;
94   };
95 
96   // Keep track of all nodes ready to be scheduled (i.e. all their dependencies
97   // have been scheduled. Note that this class is inteded to be extended by
98   // concrete implementation of the scheduling queue which define the policy
99   // to pop node from the queue.
100   class SchedulingQueueBase {
101    public:
SchedulingQueueBase(InstructionScheduler * scheduler)102     explicit SchedulingQueueBase(InstructionScheduler* scheduler)
103       : scheduler_(scheduler),
104         nodes_(scheduler->zone()) {
105     }
106 
107     void AddNode(ScheduleGraphNode* node);
108 
IsEmpty()109     bool IsEmpty() const {
110       return nodes_.empty();
111     }
112 
113    protected:
114     InstructionScheduler* scheduler_;
115     ZoneLinkedList<ScheduleGraphNode*> nodes_;
116   };
117 
118   // A scheduling queue which prioritize nodes on the critical path (we look
119   // for the instruction with the highest latency on the path to reach the end
120   // of the graph).
121   class CriticalPathFirstQueue : public SchedulingQueueBase  {
122    public:
CriticalPathFirstQueue(InstructionScheduler * scheduler)123     explicit CriticalPathFirstQueue(InstructionScheduler* scheduler)
124       : SchedulingQueueBase(scheduler) { }
125 
126     // Look for the best candidate to schedule, remove it from the queue and
127     // return it.
128     ScheduleGraphNode* PopBestCandidate(int cycle);
129   };
130 
131   // A queue which pop a random node from the queue to perform stress tests on
132   // the scheduler.
133   class StressSchedulerQueue : public SchedulingQueueBase  {
134    public:
StressSchedulerQueue(InstructionScheduler * scheduler)135     explicit StressSchedulerQueue(InstructionScheduler* scheduler)
136       : SchedulingQueueBase(scheduler) { }
137 
138     ScheduleGraphNode* PopBestCandidate(int cycle);
139 
140    private:
isolate()141     Isolate *isolate() {
142       return scheduler_->isolate();
143     }
144   };
145 
146   // Perform scheduling for the current block specifying the queue type to
147   // use to determine the next best candidate.
148   template <typename QueueType>
149   void ScheduleBlock();
150 
151   // Return the scheduling properties of the given instruction.
152   int GetInstructionFlags(const Instruction* instr) const;
153   int GetTargetInstructionFlags(const Instruction* instr) const;
154 
155   // Return true if the instruction is a basic block terminator.
156   bool IsBlockTerminator(const Instruction* instr) const;
157 
158   // Check whether the given instruction has side effects (e.g. function call,
159   // memory store).
HasSideEffect(const Instruction * instr)160   bool HasSideEffect(const Instruction* instr) const {
161     return (GetInstructionFlags(instr) & kHasSideEffect) != 0;
162   }
163 
164   // Return true if the instruction is a memory load.
IsLoadOperation(const Instruction * instr)165   bool IsLoadOperation(const Instruction* instr) const {
166     return (GetInstructionFlags(instr) & kIsLoadOperation) != 0;
167   }
168 
169   // Return true if this instruction is usually associated with a deopt check
170   // to validate its input.
MayNeedDeoptCheck(const Instruction * instr)171   bool MayNeedDeoptCheck(const Instruction* instr) const {
172     return (GetInstructionFlags(instr) & kMayNeedDeoptCheck) != 0;
173   }
174 
175   // Return true if the instruction cannot be moved before the last deopt
176   // point we encountered.
DependsOnDeoptimization(const Instruction * instr)177   bool DependsOnDeoptimization(const Instruction* instr) const {
178     return MayNeedDeoptCheck(instr) || instr->IsDeoptimizeCall() ||
179            HasSideEffect(instr) || IsLoadOperation(instr);
180   }
181 
182   // Identify nops used as a definition point for live-in registers at
183   // function entry.
IsFixedRegisterParameter(const Instruction * instr)184   bool IsFixedRegisterParameter(const Instruction* instr) const {
185     return (instr->arch_opcode() == kArchNop) && (instr->OutputCount() == 1) &&
186            (instr->OutputAt(0)->IsUnallocated()) &&
187            (UnallocatedOperand::cast(instr->OutputAt(0))
188                 ->HasFixedRegisterPolicy() ||
189             UnallocatedOperand::cast(instr->OutputAt(0))
190                 ->HasFixedFPRegisterPolicy());
191   }
192 
193   void ComputeTotalLatencies();
194 
195   static int GetInstructionLatency(const Instruction* instr);
196 
zone()197   Zone* zone() { return zone_; }
sequence()198   InstructionSequence* sequence() { return sequence_; }
isolate()199   Isolate* isolate() { return sequence()->isolate(); }
200 
201   Zone* zone_;
202   InstructionSequence* sequence_;
203   ZoneVector<ScheduleGraphNode*> graph_;
204 
205   // Last side effect instruction encountered while building the graph.
206   ScheduleGraphNode* last_side_effect_instr_;
207 
208   // Set of load instructions encountered since the last side effect instruction
209   // which will be added as predecessors of the next instruction with side
210   // effects.
211   ZoneVector<ScheduleGraphNode*> pending_loads_;
212 
213   // Live-in register markers are nop instructions which are emitted at the
214   // beginning of a basic block so that the register allocator will find a
215   // defining instruction for live-in values. They must not be moved.
216   // All these nops are chained together and added as a predecessor of every
217   // other instructions in the basic block.
218   ScheduleGraphNode* last_live_in_reg_marker_;
219 
220   // Last deoptimization instruction encountered while building the graph.
221   ScheduleGraphNode* last_deopt_;
222 
223   // Keep track of definition points for virtual registers. This is used to
224   // record operand dependencies in the scheduling graph.
225   ZoneMap<int32_t, ScheduleGraphNode*> operands_map_;
226 };
227 
228 }  // namespace compiler
229 }  // namespace internal
230 }  // namespace v8
231 
232 #endif  // V8_COMPILER_INSTRUCTION_SCHEDULER_H_
233