• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_COMPILER_INSTRUCTION_SELECTOR_H_
6 #define V8_COMPILER_INSTRUCTION_SELECTOR_H_
7 
8 #include <map>
9 
10 #include "src/compiler/common-operator.h"
11 #include "src/compiler/instruction-scheduler.h"
12 #include "src/compiler/instruction.h"
13 #include "src/compiler/machine-operator.h"
14 #include "src/compiler/node.h"
15 #include "src/globals.h"
16 #include "src/zone/zone-containers.h"
17 
18 namespace v8 {
19 namespace internal {
20 namespace compiler {
21 
22 // Forward declarations.
23 class BasicBlock;
24 struct CallBuffer;  // TODO(bmeurer): Remove this.
25 class FlagsContinuation;
26 class Linkage;
27 class OperandGenerator;
28 struct SwitchInfo;
29 class StateObjectDeduplicator;
30 
31 // This struct connects nodes of parameters which are going to be pushed on the
32 // call stack with their parameter index in the call descriptor of the callee.
33 class PushParameter {
34  public:
PushParameter()35   PushParameter() : node_(nullptr), type_(MachineType::None()) {}
PushParameter(Node * node,MachineType type)36   PushParameter(Node* node, MachineType type) : node_(node), type_(type) {}
37 
node()38   Node* node() const { return node_; }
type()39   MachineType type() const { return type_; }
40 
41  private:
42   Node* node_;
43   MachineType type_;
44 };
45 
46 enum class FrameStateInputKind { kAny, kStackSlot };
47 
48 // Instruction selection generates an InstructionSequence for a given Schedule.
49 class V8_EXPORT_PRIVATE InstructionSelector final {
50  public:
51   // Forward declarations.
52   class Features;
53 
54   enum SourcePositionMode { kCallSourcePositions, kAllSourcePositions };
55   enum EnableScheduling { kDisableScheduling, kEnableScheduling };
56   enum EnableSerialization { kDisableSerialization, kEnableSerialization };
57 
58   InstructionSelector(
59       Zone* zone, size_t node_count, Linkage* linkage,
60       InstructionSequence* sequence, Schedule* schedule,
61       SourcePositionTable* source_positions, Frame* frame,
62       SourcePositionMode source_position_mode = kCallSourcePositions,
63       Features features = SupportedFeatures(),
64       EnableScheduling enable_scheduling = FLAG_turbo_instruction_scheduling
65                                                ? kEnableScheduling
66                                                : kDisableScheduling,
67       EnableSerialization enable_serialization = kDisableSerialization);
68 
69   // Visit code for the entire graph with the included schedule.
70   bool SelectInstructions();
71 
72   void StartBlock(RpoNumber rpo);
73   void EndBlock(RpoNumber rpo);
74   void AddInstruction(Instruction* instr);
75 
76   // ===========================================================================
77   // ============= Architecture-independent code emission methods. =============
78   // ===========================================================================
79 
80   Instruction* Emit(InstructionCode opcode, InstructionOperand output,
81                     size_t temp_count = 0, InstructionOperand* temps = nullptr);
82   Instruction* Emit(InstructionCode opcode, InstructionOperand output,
83                     InstructionOperand a, size_t temp_count = 0,
84                     InstructionOperand* temps = nullptr);
85   Instruction* Emit(InstructionCode opcode, InstructionOperand output,
86                     InstructionOperand a, InstructionOperand b,
87                     size_t temp_count = 0, InstructionOperand* temps = nullptr);
88   Instruction* Emit(InstructionCode opcode, InstructionOperand output,
89                     InstructionOperand a, InstructionOperand b,
90                     InstructionOperand c, size_t temp_count = 0,
91                     InstructionOperand* temps = nullptr);
92   Instruction* Emit(InstructionCode opcode, InstructionOperand output,
93                     InstructionOperand a, InstructionOperand b,
94                     InstructionOperand c, InstructionOperand d,
95                     size_t temp_count = 0, InstructionOperand* temps = nullptr);
96   Instruction* Emit(InstructionCode opcode, InstructionOperand output,
97                     InstructionOperand a, InstructionOperand b,
98                     InstructionOperand c, InstructionOperand d,
99                     InstructionOperand e, size_t temp_count = 0,
100                     InstructionOperand* temps = nullptr);
101   Instruction* Emit(InstructionCode opcode, InstructionOperand output,
102                     InstructionOperand a, InstructionOperand b,
103                     InstructionOperand c, InstructionOperand d,
104                     InstructionOperand e, InstructionOperand f,
105                     size_t temp_count = 0, InstructionOperand* temps = nullptr);
106   Instruction* Emit(InstructionCode opcode, size_t output_count,
107                     InstructionOperand* outputs, size_t input_count,
108                     InstructionOperand* inputs, size_t temp_count = 0,
109                     InstructionOperand* temps = nullptr);
110   Instruction* Emit(Instruction* instr);
111 
112   // ===========================================================================
113   // ===== Architecture-independent deoptimization exit emission methods. ======
114   // ===========================================================================
115 
116   Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output,
117                               InstructionOperand a, DeoptimizeKind kind,
118                               DeoptimizeReason reason, Node* frame_state);
119   Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output,
120                               InstructionOperand a, InstructionOperand b,
121                               DeoptimizeKind kind, DeoptimizeReason reason,
122                               Node* frame_state);
123   Instruction* EmitDeoptimize(InstructionCode opcode, size_t output_count,
124                               InstructionOperand* outputs, size_t input_count,
125                               InstructionOperand* inputs, DeoptimizeKind kind,
126                               DeoptimizeReason reason, Node* frame_state);
127 
128   // ===========================================================================
129   // ============== Architecture-independent CPU feature methods. ==============
130   // ===========================================================================
131 
132   class Features final {
133    public:
Features()134     Features() : bits_(0) {}
Features(unsigned bits)135     explicit Features(unsigned bits) : bits_(bits) {}
Features(CpuFeature f)136     explicit Features(CpuFeature f) : bits_(1u << f) {}
Features(CpuFeature f1,CpuFeature f2)137     Features(CpuFeature f1, CpuFeature f2) : bits_((1u << f1) | (1u << f2)) {}
138 
Contains(CpuFeature f)139     bool Contains(CpuFeature f) const { return (bits_ & (1u << f)); }
140 
141    private:
142     unsigned bits_;
143   };
144 
IsSupported(CpuFeature feature)145   bool IsSupported(CpuFeature feature) const {
146     return features_.Contains(feature);
147   }
148 
149   // Returns the features supported on the target platform.
SupportedFeatures()150   static Features SupportedFeatures() {
151     return Features(CpuFeatures::SupportedFeatures());
152   }
153 
154   // TODO(sigurds) This should take a CpuFeatures argument.
155   static MachineOperatorBuilder::Flags SupportedMachineOperatorFlags();
156 
157   static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements();
158 
159   // ===========================================================================
160   // ============ Architecture-independent graph covering methods. =============
161   // ===========================================================================
162 
163   // Used in pattern matching during code generation.
164   // Check if {node} can be covered while generating code for the current
165   // instruction. A node can be covered if the {user} of the node has the only
166   // edge and the two are in the same basic block.
167   bool CanCover(Node* user, Node* node) const;
168 
169   // Used in pattern matching during code generation.
170   // This function checks that {node} and {user} are in the same basic block,
171   // and that {user} is the only user of {node} in this basic block.  This
172   // check guarantees that there are no users of {node} scheduled between
173   // {node} and {user}, and thus we can select a single instruction for both
174   // nodes, if such an instruction exists. This check can be used for example
175   // when selecting instructions for:
176   //   n = Int32Add(a, b)
177   //   c = Word32Compare(n, 0, cond)
178   //   Branch(c, true_label, false_label)
179   // Here we can generate a flag-setting add instruction, even if the add has
180   // uses in other basic blocks, since the flag-setting add instruction will
181   // still generate the result of the addition and not just set the flags.
182   // However, if we had uses of the add in the same basic block, we could have:
183   //   n = Int32Add(a, b)
184   //   o = OtherOp(n, ...)
185   //   c = Word32Compare(n, 0, cond)
186   //   Branch(c, true_label, false_label)
187   // where we cannot select the add and the compare together.  If we were to
188   // select a flag-setting add instruction for Word32Compare and Int32Add while
189   // visiting Word32Compare, we would then have to select an instruction for
190   // OtherOp *afterwards*, which means we would attempt to use the result of
191   // the add before we have defined it.
192   bool IsOnlyUserOfNodeInSameBlock(Node* user, Node* node) const;
193 
194   // Checks if {node} was already defined, and therefore code was already
195   // generated for it.
196   bool IsDefined(Node* node) const;
197 
198   // Checks if {node} has any uses, and therefore code has to be generated for
199   // it.
200   bool IsUsed(Node* node) const;
201 
202   // Checks if {node} is currently live.
IsLive(Node * node)203   bool IsLive(Node* node) const { return !IsDefined(node) && IsUsed(node); }
204 
205   // Gets the effect level of {node}.
206   int GetEffectLevel(Node* node) const;
207 
208   int GetVirtualRegister(const Node* node);
209   const std::map<NodeId, int> GetVirtualRegistersForTesting() const;
210 
211   // Check if we can generate loads and stores of ExternalConstants relative
212   // to the roots register, i.e. if both a root register is available for this
213   // compilation unit and the serializer is disabled.
214   bool CanAddressRelativeToRootsRegister() const;
215   // Check if we can use the roots register to access GC roots.
216   bool CanUseRootsRegister() const;
217 
isolate()218   Isolate* isolate() const { return sequence()->isolate(); }
219 
220  private:
221   friend class OperandGenerator;
222 
UseInstructionScheduling()223   bool UseInstructionScheduling() const {
224     return (enable_scheduling_ == kEnableScheduling) &&
225            InstructionScheduler::SchedulerSupported();
226   }
227 
228   void EmitTableSwitch(const SwitchInfo& sw, InstructionOperand& index_operand);
229   void EmitLookupSwitch(const SwitchInfo& sw,
230                         InstructionOperand& value_operand);
231 
232   void TryRename(InstructionOperand* op);
233   int GetRename(int virtual_register);
234   void SetRename(const Node* node, const Node* rename);
235   void UpdateRenames(Instruction* instruction);
236   void UpdateRenamesInPhi(PhiInstruction* phi);
237 
238   // Inform the instruction selection that {node} was just defined.
239   void MarkAsDefined(Node* node);
240 
241   // Inform the instruction selection that {node} has at least one use and we
242   // will need to generate code for it.
243   void MarkAsUsed(Node* node);
244 
245   // Sets the effect level of {node}.
246   void SetEffectLevel(Node* node, int effect_level);
247 
248   // Inform the register allocation of the representation of the value produced
249   // by {node}.
250   void MarkAsRepresentation(MachineRepresentation rep, Node* node);
MarkAsWord32(Node * node)251   void MarkAsWord32(Node* node) {
252     MarkAsRepresentation(MachineRepresentation::kWord32, node);
253   }
MarkAsWord64(Node * node)254   void MarkAsWord64(Node* node) {
255     MarkAsRepresentation(MachineRepresentation::kWord64, node);
256   }
MarkAsFloat32(Node * node)257   void MarkAsFloat32(Node* node) {
258     MarkAsRepresentation(MachineRepresentation::kFloat32, node);
259   }
MarkAsFloat64(Node * node)260   void MarkAsFloat64(Node* node) {
261     MarkAsRepresentation(MachineRepresentation::kFloat64, node);
262   }
MarkAsSimd128(Node * node)263   void MarkAsSimd128(Node* node) {
264     MarkAsRepresentation(MachineRepresentation::kSimd128, node);
265   }
MarkAsSimd1x4(Node * node)266   void MarkAsSimd1x4(Node* node) {
267     if (kSimdMaskRegisters) {
268       MarkAsRepresentation(MachineRepresentation::kSimd1x4, node);
269     } else {
270       MarkAsSimd128(node);
271     }
272   }
MarkAsSimd1x8(Node * node)273   void MarkAsSimd1x8(Node* node) {
274     if (kSimdMaskRegisters) {
275       MarkAsRepresentation(MachineRepresentation::kSimd1x8, node);
276     } else {
277       MarkAsSimd128(node);
278     }
279   }
MarkAsSimd1x16(Node * node)280   void MarkAsSimd1x16(Node* node) {
281     if (kSimdMaskRegisters) {
282       MarkAsRepresentation(MachineRepresentation::kSimd1x16, node);
283     } else {
284       MarkAsSimd128(node);
285     }
286   }
MarkAsReference(Node * node)287   void MarkAsReference(Node* node) {
288     MarkAsRepresentation(MachineRepresentation::kTagged, node);
289   }
290 
291   // Inform the register allocation of the representation of the unallocated
292   // operand {op}.
293   void MarkAsRepresentation(MachineRepresentation rep,
294                             const InstructionOperand& op);
295 
296   enum CallBufferFlag {
297     kCallCodeImmediate = 1u << 0,
298     kCallAddressImmediate = 1u << 1,
299     kCallTail = 1u << 2
300   };
301   typedef base::Flags<CallBufferFlag> CallBufferFlags;
302 
303   // Initialize the call buffer with the InstructionOperands, nodes, etc,
304   // corresponding
305   // to the inputs and outputs of the call.
306   // {call_code_immediate} to generate immediate operands to calls of code.
307   // {call_address_immediate} to generate immediate operands to address calls.
308   void InitializeCallBuffer(Node* call, CallBuffer* buffer,
309                             CallBufferFlags flags, int stack_slot_delta = 0);
310   bool IsTailCallAddressImmediate();
311   int GetTempsCountForTailCallFromJSFunction();
312 
313   FrameStateDescriptor* GetFrameStateDescriptor(Node* node);
314   size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor,
315                                          Node* state, OperandGenerator* g,
316                                          StateObjectDeduplicator* deduplicator,
317                                          InstructionOperandVector* inputs,
318                                          FrameStateInputKind kind, Zone* zone);
319   size_t AddOperandToStateValueDescriptor(StateValueList* values,
320                                           InstructionOperandVector* inputs,
321                                           OperandGenerator* g,
322                                           StateObjectDeduplicator* deduplicator,
323                                           Node* input, MachineType type,
324                                           FrameStateInputKind kind, Zone* zone);
325 
326   // ===========================================================================
327   // ============= Architecture-specific graph covering methods. ===============
328   // ===========================================================================
329 
330   // Visit nodes in the given block and generate code.
331   void VisitBlock(BasicBlock* block);
332 
333   // Visit the node for the control flow at the end of the block, generating
334   // code if necessary.
335   void VisitControl(BasicBlock* block);
336 
337   // Visit the node and generate code, if any.
338   void VisitNode(Node* node);
339 
340   // Visit the node and generate code for IEEE 754 functions.
341   void VisitFloat64Ieee754Binop(Node*, InstructionCode code);
342   void VisitFloat64Ieee754Unop(Node*, InstructionCode code);
343 
344 #define DECLARE_GENERATOR(x) void Visit##x(Node* node);
345   MACHINE_OP_LIST(DECLARE_GENERATOR)
346   MACHINE_SIMD_OP_LIST(DECLARE_GENERATOR)
347 #undef DECLARE_GENERATOR
348 
349   void VisitFinishRegion(Node* node);
350   void VisitParameter(Node* node);
351   void VisitIfException(Node* node);
352   void VisitOsrValue(Node* node);
353   void VisitPhi(Node* node);
354   void VisitProjection(Node* node);
355   void VisitConstant(Node* node);
356   void VisitCall(Node* call, BasicBlock* handler = nullptr);
357   void VisitDeoptimizeIf(Node* node);
358   void VisitDeoptimizeUnless(Node* node);
359   void VisitTrapIf(Node* node, Runtime::FunctionId func_id);
360   void VisitTrapUnless(Node* node, Runtime::FunctionId func_id);
361   void VisitTailCall(Node* call);
362   void VisitGoto(BasicBlock* target);
363   void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
364   void VisitSwitch(Node* node, const SwitchInfo& sw);
365   void VisitDeoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
366                        Node* value);
367   void VisitReturn(Node* ret);
368   void VisitThrow(Node* value);
369   void VisitRetain(Node* node);
370 
371   void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments,
372                             const CallDescriptor* descriptor, Node* node);
373 
374   void EmitIdentity(Node* node);
375   bool CanProduceSignalingNaN(Node* node);
376 
377   // ===========================================================================
378 
schedule()379   Schedule* schedule() const { return schedule_; }
linkage()380   Linkage* linkage() const { return linkage_; }
sequence()381   InstructionSequence* sequence() const { return sequence_; }
instruction_zone()382   Zone* instruction_zone() const { return sequence()->zone(); }
zone()383   Zone* zone() const { return zone_; }
384 
set_instruction_selection_failed()385   void set_instruction_selection_failed() {
386     instruction_selection_failed_ = true;
387   }
instruction_selection_failed()388   bool instruction_selection_failed() { return instruction_selection_failed_; }
389 
390   void MarkPairProjectionsAsWord32(Node* node);
391   bool IsSourcePositionUsed(Node* node);
392 
393   // ===========================================================================
394 
395   Zone* const zone_;
396   Linkage* const linkage_;
397   InstructionSequence* const sequence_;
398   SourcePositionTable* const source_positions_;
399   SourcePositionMode const source_position_mode_;
400   Features features_;
401   Schedule* const schedule_;
402   BasicBlock* current_block_;
403   ZoneVector<Instruction*> instructions_;
404   BoolVector defined_;
405   BoolVector used_;
406   IntVector effect_level_;
407   IntVector virtual_registers_;
408   IntVector virtual_register_rename_;
409   InstructionScheduler* scheduler_;
410   EnableScheduling enable_scheduling_;
411   EnableSerialization enable_serialization_;
412   Frame* frame_;
413   bool instruction_selection_failed_;
414 };
415 
416 }  // namespace compiler
417 }  // namespace internal
418 }  // namespace v8
419 
420 #endif  // V8_COMPILER_INSTRUCTION_SELECTOR_H_
421