• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  // Copyright 2014 the V8 project authors. All rights reserved.
2  // Use of this source code is governed by a BSD-style license that can be
3  // found in the LICENSE file.
4  
5  #ifndef V8_COMPILER_INSTRUCTION_SELECTOR_H_
6  #define V8_COMPILER_INSTRUCTION_SELECTOR_H_
7  
8  #include <deque>
9  
10  #include "src/compiler/common-operator.h"
11  #include "src/compiler/instruction.h"
12  #include "src/compiler/machine-operator.h"
13  #include "src/zone-containers.h"
14  
15  namespace v8 {
16  namespace internal {
17  namespace compiler {
18  
19  // Forward declarations.
20  struct CallBuffer;  // TODO(bmeurer): Remove this.
21  class FlagsContinuation;
22  
23  class InstructionSelector FINAL {
24   public:
25    // Forward declarations.
26    class Features;
27  
28    InstructionSelector(InstructionSequence* sequence,
29                        SourcePositionTable* source_positions,
30                        Features features = SupportedFeatures());
31  
32    // Visit code for the entire graph with the included schedule.
33    void SelectInstructions();
34  
35    // ===========================================================================
36    // ============= Architecture-independent code emission methods. =============
37    // ===========================================================================
38  
39    Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
40                      size_t temp_count = 0, InstructionOperand* *temps = NULL);
41    Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
42                      InstructionOperand* a, size_t temp_count = 0,
43                      InstructionOperand* *temps = NULL);
44    Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
45                      InstructionOperand* a, InstructionOperand* b,
46                      size_t temp_count = 0, InstructionOperand* *temps = NULL);
47    Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
48                      InstructionOperand* a, InstructionOperand* b,
49                      InstructionOperand* c, size_t temp_count = 0,
50                      InstructionOperand* *temps = NULL);
51    Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
52                      InstructionOperand* a, InstructionOperand* b,
53                      InstructionOperand* c, InstructionOperand* d,
54                      size_t temp_count = 0, InstructionOperand* *temps = NULL);
55    Instruction* Emit(InstructionCode opcode, size_t output_count,
56                      InstructionOperand** outputs, size_t input_count,
57                      InstructionOperand** inputs, size_t temp_count = 0,
58                      InstructionOperand* *temps = NULL);
59    Instruction* Emit(Instruction* instr);
60  
61    // ===========================================================================
62    // ============== Architecture-independent CPU feature methods. ==============
63    // ===========================================================================
64  
65    class Features FINAL {
66     public:
Features()67      Features() : bits_(0) {}
Features(unsigned bits)68      explicit Features(unsigned bits) : bits_(bits) {}
Features(CpuFeature f)69      explicit Features(CpuFeature f) : bits_(1u << f) {}
Features(CpuFeature f1,CpuFeature f2)70      Features(CpuFeature f1, CpuFeature f2) : bits_((1u << f1) | (1u << f2)) {}
71  
Contains(CpuFeature f)72      bool Contains(CpuFeature f) const { return (bits_ & (1u << f)); }
73  
74     private:
75      unsigned bits_;
76    };
77  
IsSupported(CpuFeature feature)78    bool IsSupported(CpuFeature feature) const {
79      return features_.Contains(feature);
80    }
81  
82    // Returns the features supported on the target platform.
SupportedFeatures()83    static Features SupportedFeatures() {
84      return Features(CpuFeatures::SupportedFeatures());
85    }
86  
87   private:
88    friend class OperandGenerator;
89  
90    // ===========================================================================
91    // ============ Architecture-independent graph covering methods. =============
92    // ===========================================================================
93  
94    // Checks if {block} will appear directly after {current_block_} when
95    // assembling code, in which case, a fall-through can be used.
96    bool IsNextInAssemblyOrder(const BasicBlock* block) const;
97  
98    // Used in pattern matching during code generation.
99    // Check if {node} can be covered while generating code for the current
100    // instruction. A node can be covered if the {user} of the node has the only
101    // edge and the two are in the same basic block.
102    bool CanCover(Node* user, Node* node) const;
103  
104    // Checks if {node} was already defined, and therefore code was already
105    // generated for it.
106    bool IsDefined(Node* node) const;
107  
108    // Inform the instruction selection that {node} was just defined.
109    void MarkAsDefined(Node* node);
110  
111    // Checks if {node} has any uses, and therefore code has to be generated for
112    // it.
113    bool IsUsed(Node* node) const;
114  
115    // Inform the instruction selection that {node} has at least one use and we
116    // will need to generate code for it.
117    void MarkAsUsed(Node* node);
118  
119    // Checks if {node} is marked as double.
120    bool IsDouble(const Node* node) const;
121  
122    // Inform the register allocator of a double result.
123    void MarkAsDouble(Node* node);
124  
125    // Checks if {node} is marked as reference.
126    bool IsReference(const Node* node) const;
127  
128    // Inform the register allocator of a reference result.
129    void MarkAsReference(Node* node);
130  
131    // Inform the register allocation of the representation of the value produced
132    // by {node}.
133    void MarkAsRepresentation(MachineType rep, Node* node);
134  
135    // Initialize the call buffer with the InstructionOperands, nodes, etc,
136    // corresponding
137    // to the inputs and outputs of the call.
138    // {call_code_immediate} to generate immediate operands to calls of code.
139    // {call_address_immediate} to generate immediate operands to address calls.
140    void InitializeCallBuffer(Node* call, CallBuffer* buffer,
141                              bool call_code_immediate,
142                              bool call_address_immediate);
143  
144    FrameStateDescriptor* GetFrameStateDescriptor(Node* node);
145    void AddFrameStateInputs(Node* state, InstructionOperandVector* inputs,
146                             FrameStateDescriptor* descriptor);
147  
148    // ===========================================================================
149    // ============= Architecture-specific graph covering methods. ===============
150    // ===========================================================================
151  
152    // Visit nodes in the given block and generate code.
153    void VisitBlock(BasicBlock* block);
154  
155    // Visit the node for the control flow at the end of the block, generating
156    // code if necessary.
157    void VisitControl(BasicBlock* block);
158  
159    // Visit the node and generate code, if any.
160    void VisitNode(Node* node);
161  
162  #define DECLARE_GENERATOR(x) void Visit##x(Node* node);
163    MACHINE_OP_LIST(DECLARE_GENERATOR)
164  #undef DECLARE_GENERATOR
165  
166    void VisitInt32AddWithOverflow(Node* node, FlagsContinuation* cont);
167    void VisitInt32SubWithOverflow(Node* node, FlagsContinuation* cont);
168  
169    void VisitWord32Test(Node* node, FlagsContinuation* cont);
170    void VisitWord64Test(Node* node, FlagsContinuation* cont);
171    void VisitWord32Compare(Node* node, FlagsContinuation* cont);
172    void VisitWord64Compare(Node* node, FlagsContinuation* cont);
173    void VisitFloat64Compare(Node* node, FlagsContinuation* cont);
174  
175    void VisitFinish(Node* node);
176    void VisitParameter(Node* node);
177    void VisitPhi(Node* node);
178    void VisitProjection(Node* node);
179    void VisitConstant(Node* node);
180    void VisitCall(Node* call, BasicBlock* continuation,
181                   BasicBlock* deoptimization);
182    void VisitGoto(BasicBlock* target);
183    void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
184    void VisitReturn(Node* value);
185    void VisitThrow(Node* value);
186    void VisitDeoptimize(Node* deopt);
187  
188    // ===========================================================================
189  
graph()190    Graph* graph() const { return sequence()->graph(); }
linkage()191    Linkage* linkage() const { return sequence()->linkage(); }
schedule()192    Schedule* schedule() const { return sequence()->schedule(); }
sequence()193    InstructionSequence* sequence() const { return sequence_; }
instruction_zone()194    Zone* instruction_zone() const { return sequence()->zone(); }
zone()195    Zone* zone() { return &zone_; }
196  
197    // ===========================================================================
198  
199    Zone zone_;
200    InstructionSequence* sequence_;
201    SourcePositionTable* source_positions_;
202    Features features_;
203    BasicBlock* current_block_;
204    ZoneDeque<Instruction*> instructions_;
205    BoolVector defined_;
206    BoolVector used_;
207  };
208  
209  }  // namespace compiler
210  }  // namespace internal
211  }  // namespace v8
212  
213  #endif  // V8_COMPILER_INSTRUCTION_SELECTOR_H_
214