• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_COMPILER_BACKEND_INSTRUCTION_SCHEDULER_H_
6 #define V8_COMPILER_BACKEND_INSTRUCTION_SCHEDULER_H_
7 
8 #include "src/base/optional.h"
9 #include "src/base/utils/random-number-generator.h"
10 #include "src/compiler/backend/instruction.h"
11 #include "src/zone/zone-containers.h"
12 
13 namespace v8 {
14 namespace internal {
15 namespace compiler {
16 
17 // A set of flags describing properties of the instructions so that the
18 // scheduler is aware of dependencies between instructions.
19 enum ArchOpcodeFlags {
20   kNoOpcodeFlags = 0,
21   kHasSideEffect = 1,    // The instruction has some side effects (memory
22                          // store, function call...)
23   kIsLoadOperation = 2,  // The instruction is a memory load.
24   kMayNeedDeoptOrTrapCheck = 4,  // The instruction may be associated with a
25                                  // deopt or trap check which must be run before
26                                  // instruction e.g. div on Intel platform which
27                                  // will raise an exception when the divisor is
28                                  // zero.
29   kIsBarrier = 8,  // The instruction can cause GC or it reads/writes registers
30                    // that are not explicitly given. Nothing can be reordered
31                    // across such an instruction.
32 };
33 
34 class InstructionScheduler final : public ZoneObject {
35  public:
36   V8_EXPORT_PRIVATE InstructionScheduler(Zone* zone,
37                                          InstructionSequence* sequence);
38 
39   V8_EXPORT_PRIVATE void StartBlock(RpoNumber rpo);
40   V8_EXPORT_PRIVATE void EndBlock(RpoNumber rpo);
41 
42   V8_EXPORT_PRIVATE void AddInstruction(Instruction* instr);
43   V8_EXPORT_PRIVATE void AddTerminator(Instruction* instr);
44 
45   static bool SchedulerSupported();
46 
47  private:
48   // A scheduling graph node.
49   // Represent an instruction and their dependencies.
50   class ScheduleGraphNode : public ZoneObject {
51    public:
52     ScheduleGraphNode(Zone* zone, Instruction* instr);
53 
54     // Mark the instruction represented by 'node' as a dependency of this one.
55     // The current instruction will be registered as an unscheduled predecessor
56     // of 'node' (i.e. it must be scheduled before 'node').
57     void AddSuccessor(ScheduleGraphNode* node);
58 
59     // Check if all the predecessors of this instruction have been scheduled.
HasUnscheduledPredecessor()60     bool HasUnscheduledPredecessor() {
61       return unscheduled_predecessors_count_ != 0;
62     }
63 
64     // Record that we have scheduled one of the predecessors of this node.
DropUnscheduledPredecessor()65     void DropUnscheduledPredecessor() {
66       DCHECK_LT(0, unscheduled_predecessors_count_);
67       unscheduled_predecessors_count_--;
68     }
69 
instruction()70     Instruction* instruction() { return instr_; }
successors()71     ZoneDeque<ScheduleGraphNode*>& successors() { return successors_; }
latency()72     int latency() const { return latency_; }
73 
total_latency()74     int total_latency() const { return total_latency_; }
set_total_latency(int latency)75     void set_total_latency(int latency) { total_latency_ = latency; }
76 
start_cycle()77     int start_cycle() const { return start_cycle_; }
set_start_cycle(int start_cycle)78     void set_start_cycle(int start_cycle) { start_cycle_ = start_cycle; }
79 
80    private:
81     Instruction* instr_;
82     ZoneDeque<ScheduleGraphNode*> successors_;
83 
84     // Number of unscheduled predecessors for this node.
85     int unscheduled_predecessors_count_;
86 
87     // Estimate of the instruction latency (the number of cycles it takes for
88     // instruction to complete).
89     int latency_;
90 
91     // The sum of all the latencies on the path from this node to the end of
92     // the graph (i.e. a node with no successor).
93     int total_latency_;
94 
95     // The scheduler keeps a nominal cycle count to keep track of when the
96     // result of an instruction is available. This field is updated by the
97     // scheduler to indicate when the value of all the operands of this
98     // instruction will be available.
99     int start_cycle_;
100   };
101 
102   // Keep track of all nodes ready to be scheduled (i.e. all their dependencies
103   // have been scheduled. Note that this class is inteded to be extended by
104   // concrete implementation of the scheduling queue which define the policy
105   // to pop node from the queue.
106   class SchedulingQueueBase {
107    public:
SchedulingQueueBase(InstructionScheduler * scheduler)108     explicit SchedulingQueueBase(InstructionScheduler* scheduler)
109         : scheduler_(scheduler), nodes_(scheduler->zone()) {}
110 
111     void AddNode(ScheduleGraphNode* node);
112 
IsEmpty()113     bool IsEmpty() const { return nodes_.empty(); }
114 
115    protected:
116     InstructionScheduler* scheduler_;
117     ZoneLinkedList<ScheduleGraphNode*> nodes_;
118   };
119 
120   // A scheduling queue which prioritize nodes on the critical path (we look
121   // for the instruction with the highest latency on the path to reach the end
122   // of the graph).
123   class CriticalPathFirstQueue : public SchedulingQueueBase {
124    public:
CriticalPathFirstQueue(InstructionScheduler * scheduler)125     explicit CriticalPathFirstQueue(InstructionScheduler* scheduler)
126         : SchedulingQueueBase(scheduler) {}
127 
128     // Look for the best candidate to schedule, remove it from the queue and
129     // return it.
130     ScheduleGraphNode* PopBestCandidate(int cycle);
131   };
132 
133   // A queue which pop a random node from the queue to perform stress tests on
134   // the scheduler.
135   class StressSchedulerQueue : public SchedulingQueueBase {
136    public:
StressSchedulerQueue(InstructionScheduler * scheduler)137     explicit StressSchedulerQueue(InstructionScheduler* scheduler)
138         : SchedulingQueueBase(scheduler) {}
139 
140     ScheduleGraphNode* PopBestCandidate(int cycle);
141 
142    private:
random_number_generator()143     base::RandomNumberGenerator* random_number_generator() {
144       return scheduler_->random_number_generator();
145     }
146   };
147 
148   // Perform scheduling for the current block specifying the queue type to
149   // use to determine the next best candidate.
150   template <typename QueueType>
151   void Schedule();
152 
153   // Return the scheduling properties of the given instruction.
154   V8_EXPORT_PRIVATE int GetInstructionFlags(const Instruction* instr) const;
155   int GetTargetInstructionFlags(const Instruction* instr) const;
156 
IsBarrier(const Instruction * instr)157   bool IsBarrier(const Instruction* instr) const {
158     return (GetInstructionFlags(instr) & kIsBarrier) != 0;
159   }
160 
161   // Check whether the given instruction has side effects (e.g. function call,
162   // memory store).
HasSideEffect(const Instruction * instr)163   bool HasSideEffect(const Instruction* instr) const {
164     return (GetInstructionFlags(instr) & kHasSideEffect) != 0;
165   }
166 
167   // Return true if the instruction is a memory load.
IsLoadOperation(const Instruction * instr)168   bool IsLoadOperation(const Instruction* instr) const {
169     return (GetInstructionFlags(instr) & kIsLoadOperation) != 0;
170   }
171 
CanTrap(const Instruction * instr)172   bool CanTrap(const Instruction* instr) const {
173     return instr->IsTrap() ||
174            (instr->HasMemoryAccessMode() &&
175             instr->memory_access_mode() == kMemoryAccessProtected);
176   }
177 
178   // The scheduler will not move the following instructions before the last
179   // deopt/trap check:
180   //  * loads (this is conservative)
181   //  * instructions with side effect
182   //  * other deopts/traps
183   // Any other instruction can be moved, apart from those that raise exceptions
184   // on specific inputs - these are filtered out by the deopt/trap check.
MayNeedDeoptOrTrapCheck(const Instruction * instr)185   bool MayNeedDeoptOrTrapCheck(const Instruction* instr) const {
186     return (GetInstructionFlags(instr) & kMayNeedDeoptOrTrapCheck) != 0;
187   }
188 
189   // Return true if the instruction cannot be moved before the last deopt or
190   // trap point we encountered.
DependsOnDeoptOrTrap(const Instruction * instr)191   bool DependsOnDeoptOrTrap(const Instruction* instr) const {
192     return MayNeedDeoptOrTrapCheck(instr) || instr->IsDeoptimizeCall() ||
193            CanTrap(instr) || HasSideEffect(instr) || IsLoadOperation(instr);
194   }
195 
196   // Identify nops used as a definition point for live-in registers at
197   // function entry.
IsFixedRegisterParameter(const Instruction * instr)198   bool IsFixedRegisterParameter(const Instruction* instr) const {
199     return (instr->arch_opcode() == kArchNop) && (instr->OutputCount() == 1) &&
200            (instr->OutputAt(0)->IsUnallocated()) &&
201            (UnallocatedOperand::cast(instr->OutputAt(0))
202                 ->HasFixedRegisterPolicy() ||
203             UnallocatedOperand::cast(instr->OutputAt(0))
204                 ->HasFixedFPRegisterPolicy());
205   }
206 
207   void ComputeTotalLatencies();
208 
209   static int GetInstructionLatency(const Instruction* instr);
210 
zone()211   Zone* zone() { return zone_; }
sequence()212   InstructionSequence* sequence() { return sequence_; }
random_number_generator()213   base::RandomNumberGenerator* random_number_generator() {
214     return &random_number_generator_.value();
215   }
216 
217   Zone* zone_;
218   InstructionSequence* sequence_;
219   ZoneVector<ScheduleGraphNode*> graph_;
220 
221   friend class InstructionSchedulerTester;
222 
223   // Last side effect instruction encountered while building the graph.
224   ScheduleGraphNode* last_side_effect_instr_;
225 
226   // Set of load instructions encountered since the last side effect instruction
227   // which will be added as predecessors of the next instruction with side
228   // effects.
229   ZoneVector<ScheduleGraphNode*> pending_loads_;
230 
231   // Live-in register markers are nop instructions which are emitted at the
232   // beginning of a basic block so that the register allocator will find a
233   // defining instruction for live-in values. They must not be moved.
234   // All these nops are chained together and added as a predecessor of every
235   // other instructions in the basic block.
236   ScheduleGraphNode* last_live_in_reg_marker_;
237 
238   // Last deoptimization or trap instruction encountered while building the
239   // graph.
240   ScheduleGraphNode* last_deopt_or_trap_;
241 
242   // Keep track of definition points for virtual registers. This is used to
243   // record operand dependencies in the scheduling graph.
244   ZoneMap<int32_t, ScheduleGraphNode*> operands_map_;
245 
246   base::Optional<base::RandomNumberGenerator> random_number_generator_;
247 };
248 
249 }  // namespace compiler
250 }  // namespace internal
251 }  // namespace v8
252 
253 #endif  // V8_COMPILER_BACKEND_INSTRUCTION_SCHEDULER_H_
254