• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_H_
6 #define V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_H_
7 
8 #include <map>
9 
10 #include "src/codegen/cpu-features.h"
11 #include "src/common/globals.h"
12 #include "src/compiler/backend/instruction-scheduler.h"
13 #include "src/compiler/backend/instruction.h"
14 #include "src/compiler/common-operator.h"
15 #include "src/compiler/feedback-source.h"
16 #include "src/compiler/linkage.h"
17 #include "src/compiler/machine-operator.h"
18 #include "src/compiler/node.h"
19 #include "src/zone/zone-containers.h"
20 
21 #if V8_ENABLE_WEBASSEMBLY
22 #include "src/wasm/simd-shuffle.h"
23 #endif  // V8_ENABLE_WEBASSEMBLY
24 
25 namespace v8 {
26 namespace internal {
27 
28 class TickCounter;
29 
30 namespace compiler {
31 
32 // Forward declarations.
33 class BasicBlock;
34 struct CallBuffer;  // TODO(bmeurer): Remove this.
35 class Linkage;
36 class OperandGenerator;
37 class SwitchInfo;
38 class StateObjectDeduplicator;
39 
40 // The flags continuation is a way to combine a branch or a materialization
41 // of a boolean value with an instruction that sets the flags register.
42 // The whole instruction is treated as a unit by the register allocator, and
43 // thus no spills or moves can be introduced between the flags-setting
44 // instruction and the branch or set it should be combined with.
45 class FlagsContinuation final {
46  public:
FlagsContinuation()47   FlagsContinuation() : mode_(kFlags_none) {}
48 
49   // Creates a new flags continuation from the given condition and true/false
50   // blocks.
ForBranch(FlagsCondition condition,BasicBlock * true_block,BasicBlock * false_block)51   static FlagsContinuation ForBranch(FlagsCondition condition,
52                                      BasicBlock* true_block,
53                                      BasicBlock* false_block) {
54     return FlagsContinuation(kFlags_branch, condition, true_block, false_block);
55   }
56 
57   // Creates a new flags continuation for an eager deoptimization exit.
ForDeoptimize(FlagsCondition condition,DeoptimizeReason reason,NodeId node_id,FeedbackSource const & feedback,FrameState frame_state)58   static FlagsContinuation ForDeoptimize(FlagsCondition condition,
59                                          DeoptimizeReason reason,
60                                          NodeId node_id,
61                                          FeedbackSource const& feedback,
62                                          FrameState frame_state) {
63     return FlagsContinuation(kFlags_deoptimize, condition, reason, node_id,
64                              feedback, frame_state);
65   }
ForDeoptimizeForTesting(FlagsCondition condition,DeoptimizeReason reason,NodeId node_id,FeedbackSource const & feedback,Node * frame_state)66   static FlagsContinuation ForDeoptimizeForTesting(
67       FlagsCondition condition, DeoptimizeReason reason, NodeId node_id,
68       FeedbackSource const& feedback, Node* frame_state) {
69     // test-instruction-scheduler.cc passes a dummy Node* as frame_state.
70     // Contents don't matter as long as it's not nullptr.
71     return FlagsContinuation(kFlags_deoptimize, condition, reason, node_id,
72                              feedback, frame_state);
73   }
74 
75   // Creates a new flags continuation for a boolean value.
ForSet(FlagsCondition condition,Node * result)76   static FlagsContinuation ForSet(FlagsCondition condition, Node* result) {
77     return FlagsContinuation(condition, result);
78   }
79 
80   // Creates a new flags continuation for a wasm trap.
ForTrap(FlagsCondition condition,TrapId trap_id,Node * result)81   static FlagsContinuation ForTrap(FlagsCondition condition, TrapId trap_id,
82                                    Node* result) {
83     return FlagsContinuation(condition, trap_id, result);
84   }
85 
ForSelect(FlagsCondition condition,Node * result,Node * true_value,Node * false_value)86   static FlagsContinuation ForSelect(FlagsCondition condition, Node* result,
87                                      Node* true_value, Node* false_value) {
88     return FlagsContinuation(condition, result, true_value, false_value);
89   }
90 
IsNone()91   bool IsNone() const { return mode_ == kFlags_none; }
IsBranch()92   bool IsBranch() const { return mode_ == kFlags_branch; }
IsDeoptimize()93   bool IsDeoptimize() const { return mode_ == kFlags_deoptimize; }
IsSet()94   bool IsSet() const { return mode_ == kFlags_set; }
IsTrap()95   bool IsTrap() const { return mode_ == kFlags_trap; }
IsSelect()96   bool IsSelect() const { return mode_ == kFlags_select; }
condition()97   FlagsCondition condition() const {
98     DCHECK(!IsNone());
99     return condition_;
100   }
reason()101   DeoptimizeReason reason() const {
102     DCHECK(IsDeoptimize());
103     return reason_;
104   }
node_id()105   NodeId node_id() const {
106     DCHECK(IsDeoptimize());
107     return node_id_;
108   }
feedback()109   FeedbackSource const& feedback() const {
110     DCHECK(IsDeoptimize());
111     return feedback_;
112   }
frame_state()113   Node* frame_state() const {
114     DCHECK(IsDeoptimize());
115     return frame_state_or_result_;
116   }
result()117   Node* result() const {
118     DCHECK(IsSet() || IsSelect());
119     return frame_state_or_result_;
120   }
trap_id()121   TrapId trap_id() const {
122     DCHECK(IsTrap());
123     return trap_id_;
124   }
true_block()125   BasicBlock* true_block() const {
126     DCHECK(IsBranch());
127     return true_block_;
128   }
false_block()129   BasicBlock* false_block() const {
130     DCHECK(IsBranch());
131     return false_block_;
132   }
true_value()133   Node* true_value() const {
134     DCHECK(IsSelect());
135     return true_value_;
136   }
false_value()137   Node* false_value() const {
138     DCHECK(IsSelect());
139     return false_value_;
140   }
141 
Negate()142   void Negate() {
143     DCHECK(!IsNone());
144     condition_ = NegateFlagsCondition(condition_);
145   }
146 
Commute()147   void Commute() {
148     DCHECK(!IsNone());
149     condition_ = CommuteFlagsCondition(condition_);
150   }
151 
Overwrite(FlagsCondition condition)152   void Overwrite(FlagsCondition condition) { condition_ = condition; }
153 
OverwriteAndNegateIfEqual(FlagsCondition condition)154   void OverwriteAndNegateIfEqual(FlagsCondition condition) {
155     DCHECK(condition_ == kEqual || condition_ == kNotEqual);
156     bool negate = condition_ == kEqual;
157     condition_ = condition;
158     if (negate) Negate();
159   }
160 
OverwriteUnsignedIfSigned()161   void OverwriteUnsignedIfSigned() {
162     switch (condition_) {
163       case kSignedLessThan:
164         condition_ = kUnsignedLessThan;
165         break;
166       case kSignedLessThanOrEqual:
167         condition_ = kUnsignedLessThanOrEqual;
168         break;
169       case kSignedGreaterThan:
170         condition_ = kUnsignedGreaterThan;
171         break;
172       case kSignedGreaterThanOrEqual:
173         condition_ = kUnsignedGreaterThanOrEqual;
174         break;
175       default:
176         break;
177     }
178   }
179 
180   // Encodes this flags continuation into the given opcode.
Encode(InstructionCode opcode)181   InstructionCode Encode(InstructionCode opcode) {
182     opcode |= FlagsModeField::encode(mode_);
183     if (mode_ != kFlags_none) {
184       opcode |= FlagsConditionField::encode(condition_);
185     }
186     return opcode;
187   }
188 
189  private:
FlagsContinuation(FlagsMode mode,FlagsCondition condition,BasicBlock * true_block,BasicBlock * false_block)190   FlagsContinuation(FlagsMode mode, FlagsCondition condition,
191                     BasicBlock* true_block, BasicBlock* false_block)
192       : mode_(mode),
193         condition_(condition),
194         true_block_(true_block),
195         false_block_(false_block) {
196     DCHECK(mode == kFlags_branch);
197     DCHECK_NOT_NULL(true_block);
198     DCHECK_NOT_NULL(false_block);
199   }
200 
FlagsContinuation(FlagsMode mode,FlagsCondition condition,DeoptimizeReason reason,NodeId node_id,FeedbackSource const & feedback,Node * frame_state)201   FlagsContinuation(FlagsMode mode, FlagsCondition condition,
202                     DeoptimizeReason reason, NodeId node_id,
203                     FeedbackSource const& feedback, Node* frame_state)
204       : mode_(mode),
205         condition_(condition),
206         reason_(reason),
207         node_id_(node_id),
208         feedback_(feedback),
209         frame_state_or_result_(frame_state) {
210     DCHECK(mode == kFlags_deoptimize);
211     DCHECK_NOT_NULL(frame_state);
212   }
213 
FlagsContinuation(FlagsCondition condition,Node * result)214   FlagsContinuation(FlagsCondition condition, Node* result)
215       : mode_(kFlags_set),
216         condition_(condition),
217         frame_state_or_result_(result) {
218     DCHECK_NOT_NULL(result);
219   }
220 
FlagsContinuation(FlagsCondition condition,TrapId trap_id,Node * result)221   FlagsContinuation(FlagsCondition condition, TrapId trap_id, Node* result)
222       : mode_(kFlags_trap),
223         condition_(condition),
224         frame_state_or_result_(result),
225         trap_id_(trap_id) {
226     DCHECK_NOT_NULL(result);
227   }
228 
FlagsContinuation(FlagsCondition condition,Node * result,Node * true_value,Node * false_value)229   FlagsContinuation(FlagsCondition condition, Node* result, Node* true_value,
230                     Node* false_value)
231       : mode_(kFlags_select),
232         condition_(condition),
233         frame_state_or_result_(result),
234         true_value_(true_value),
235         false_value_(false_value) {
236     DCHECK_NOT_NULL(result);
237     DCHECK_NOT_NULL(true_value);
238     DCHECK_NOT_NULL(false_value);
239   }
240 
241   FlagsMode const mode_;
242   FlagsCondition condition_;
243   DeoptimizeReason reason_;         // Only valid if mode_ == kFlags_deoptimize*
244   NodeId node_id_;                  // Only valid if mode_ == kFlags_deoptimize*
245   FeedbackSource feedback_;         // Only valid if mode_ == kFlags_deoptimize*
246   Node* frame_state_or_result_;     // Only valid if mode_ == kFlags_deoptimize*
247                                     // or mode_ == kFlags_set.
248   BasicBlock* true_block_;          // Only valid if mode_ == kFlags_branch*.
249   BasicBlock* false_block_;         // Only valid if mode_ == kFlags_branch*.
250   TrapId trap_id_;                  // Only valid if mode_ == kFlags_trap.
251   Node* true_value_;                // Only valid if mode_ == kFlags_select.
252   Node* false_value_;               // Only valid if mode_ == kFlags_select.
253 };
254 
255 // This struct connects nodes of parameters which are going to be pushed on the
256 // call stack with their parameter index in the call descriptor of the callee.
257 struct PushParameter {
258   PushParameter(Node* n = nullptr,
259                 LinkageLocation l = LinkageLocation::ForAnyRegister())
nodePushParameter260       : node(n), location(l) {}
261 
262   Node* node;
263   LinkageLocation location;
264 };
265 
266 enum class FrameStateInputKind { kAny, kStackSlot };
267 
268 // Instruction selection generates an InstructionSequence for a given Schedule.
269 class V8_EXPORT_PRIVATE InstructionSelector final {
270  public:
271   // Forward declarations.
272   class Features;
273 
274   enum SourcePositionMode { kCallSourcePositions, kAllSourcePositions };
275   enum EnableScheduling { kDisableScheduling, kEnableScheduling };
276   enum EnableRootsRelativeAddressing {
277     kDisableRootsRelativeAddressing,
278     kEnableRootsRelativeAddressing
279   };
280   enum EnableSwitchJumpTable {
281     kDisableSwitchJumpTable,
282     kEnableSwitchJumpTable
283   };
284   enum EnableTraceTurboJson { kDisableTraceTurboJson, kEnableTraceTurboJson };
285 
286   InstructionSelector(
287       Zone* zone, size_t node_count, Linkage* linkage,
288       InstructionSequence* sequence, Schedule* schedule,
289       SourcePositionTable* source_positions, Frame* frame,
290       EnableSwitchJumpTable enable_switch_jump_table, TickCounter* tick_counter,
291       JSHeapBroker* broker, size_t* max_unoptimized_frame_height,
292       size_t* max_pushed_argument_count,
293       SourcePositionMode source_position_mode = kCallSourcePositions,
294       Features features = SupportedFeatures(),
295       EnableScheduling enable_scheduling = FLAG_turbo_instruction_scheduling
296                                                ? kEnableScheduling
297                                                : kDisableScheduling,
298       EnableRootsRelativeAddressing enable_roots_relative_addressing =
299           kDisableRootsRelativeAddressing,
300       EnableTraceTurboJson trace_turbo = kDisableTraceTurboJson);
301 
302   // Visit code for the entire graph with the included schedule.
303   bool SelectInstructions();
304 
305   void StartBlock(RpoNumber rpo);
306   void EndBlock(RpoNumber rpo);
307   void AddInstruction(Instruction* instr);
308   void AddTerminator(Instruction* instr);
309 
310   // ===========================================================================
311   // ============= Architecture-independent code emission methods. =============
312   // ===========================================================================
313 
314   Instruction* Emit(InstructionCode opcode, InstructionOperand output,
315                     size_t temp_count = 0, InstructionOperand* temps = nullptr);
316   Instruction* Emit(InstructionCode opcode, InstructionOperand output,
317                     InstructionOperand a, size_t temp_count = 0,
318                     InstructionOperand* temps = nullptr);
319   Instruction* Emit(InstructionCode opcode, InstructionOperand output,
320                     InstructionOperand a, InstructionOperand b,
321                     size_t temp_count = 0, InstructionOperand* temps = nullptr);
322   Instruction* Emit(InstructionCode opcode, InstructionOperand output,
323                     InstructionOperand a, InstructionOperand b,
324                     InstructionOperand c, size_t temp_count = 0,
325                     InstructionOperand* temps = nullptr);
326   Instruction* Emit(InstructionCode opcode, InstructionOperand output,
327                     InstructionOperand a, InstructionOperand b,
328                     InstructionOperand c, InstructionOperand d,
329                     size_t temp_count = 0, InstructionOperand* temps = nullptr);
330   Instruction* Emit(InstructionCode opcode, InstructionOperand output,
331                     InstructionOperand a, InstructionOperand b,
332                     InstructionOperand c, InstructionOperand d,
333                     InstructionOperand e, size_t temp_count = 0,
334                     InstructionOperand* temps = nullptr);
335   Instruction* Emit(InstructionCode opcode, InstructionOperand output,
336                     InstructionOperand a, InstructionOperand b,
337                     InstructionOperand c, InstructionOperand d,
338                     InstructionOperand e, InstructionOperand f,
339                     size_t temp_count = 0, InstructionOperand* temps = nullptr);
340   Instruction* Emit(InstructionCode opcode, size_t output_count,
341                     InstructionOperand* outputs, size_t input_count,
342                     InstructionOperand* inputs, size_t temp_count = 0,
343                     InstructionOperand* temps = nullptr);
344   Instruction* Emit(Instruction* instr);
345 
346   // [0-3] operand instructions with no output, uses labels for true and false
347   // blocks of the continuation.
348   Instruction* EmitWithContinuation(InstructionCode opcode,
349                                     FlagsContinuation* cont);
350   Instruction* EmitWithContinuation(InstructionCode opcode,
351                                     InstructionOperand a,
352                                     FlagsContinuation* cont);
353   Instruction* EmitWithContinuation(InstructionCode opcode,
354                                     InstructionOperand a, InstructionOperand b,
355                                     FlagsContinuation* cont);
356   Instruction* EmitWithContinuation(InstructionCode opcode,
357                                     InstructionOperand a, InstructionOperand b,
358                                     InstructionOperand c,
359                                     FlagsContinuation* cont);
360   Instruction* EmitWithContinuation(InstructionCode opcode, size_t output_count,
361                                     InstructionOperand* outputs,
362                                     size_t input_count,
363                                     InstructionOperand* inputs,
364                                     FlagsContinuation* cont);
365   Instruction* EmitWithContinuation(
366       InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
367       size_t input_count, InstructionOperand* inputs, size_t temp_count,
368       InstructionOperand* temps, FlagsContinuation* cont);
369 
370   void EmitIdentity(Node* node);
371 
372   // ===========================================================================
373   // ============== Architecture-independent CPU feature methods. ==============
374   // ===========================================================================
375 
376   class Features final {
377    public:
Features()378     Features() : bits_(0) {}
Features(unsigned bits)379     explicit Features(unsigned bits) : bits_(bits) {}
Features(CpuFeature f)380     explicit Features(CpuFeature f) : bits_(1u << f) {}
Features(CpuFeature f1,CpuFeature f2)381     Features(CpuFeature f1, CpuFeature f2) : bits_((1u << f1) | (1u << f2)) {}
382 
Contains(CpuFeature f)383     bool Contains(CpuFeature f) const { return (bits_ & (1u << f)); }
384 
385    private:
386     unsigned bits_;
387   };
388 
IsSupported(CpuFeature feature)389   bool IsSupported(CpuFeature feature) const {
390     return features_.Contains(feature);
391   }
392 
393   // Returns the features supported on the target platform.
SupportedFeatures()394   static Features SupportedFeatures() {
395     return Features(CpuFeatures::SupportedFeatures());
396   }
397 
398   // TODO(sigurds) This should take a CpuFeatures argument.
399   static MachineOperatorBuilder::Flags SupportedMachineOperatorFlags();
400 
401   static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements();
402 
403   // ===========================================================================
404   // ============ Architecture-independent graph covering methods. =============
405   // ===========================================================================
406 
407   // Used in pattern matching during code generation.
408   // Check if {node} can be covered while generating code for the current
409   // instruction. A node can be covered if the {user} of the node has the only
410   // edge, the two are in the same basic block, and there are no side-effects
411   // in-between. The last check is crucial for soundness.
412   // For pure nodes, CanCover(a,b) is checked to avoid duplicated execution:
413   // If this is not the case, code for b must still be generated for other
414   // users, and fusing is unlikely to improve performance.
415   bool CanCover(Node* user, Node* node) const;
416 
417   // Used in pattern matching during code generation.
418   // This function checks that {node} and {user} are in the same basic block,
419   // and that {user} is the only user of {node} in this basic block.  This
420   // check guarantees that there are no users of {node} scheduled between
421   // {node} and {user}, and thus we can select a single instruction for both
422   // nodes, if such an instruction exists. This check can be used for example
423   // when selecting instructions for:
424   //   n = Int32Add(a, b)
425   //   c = Word32Compare(n, 0, cond)
426   //   Branch(c, true_label, false_label)
427   // Here we can generate a flag-setting add instruction, even if the add has
428   // uses in other basic blocks, since the flag-setting add instruction will
429   // still generate the result of the addition and not just set the flags.
430   // However, if we had uses of the add in the same basic block, we could have:
431   //   n = Int32Add(a, b)
432   //   o = OtherOp(n, ...)
433   //   c = Word32Compare(n, 0, cond)
434   //   Branch(c, true_label, false_label)
435   // where we cannot select the add and the compare together.  If we were to
436   // select a flag-setting add instruction for Word32Compare and Int32Add while
437   // visiting Word32Compare, we would then have to select an instruction for
438   // OtherOp *afterwards*, which means we would attempt to use the result of
439   // the add before we have defined it.
440   bool IsOnlyUserOfNodeInSameBlock(Node* user, Node* node) const;
441 
442   // Checks if {node} was already defined, and therefore code was already
443   // generated for it.
444   bool IsDefined(Node* node) const;
445 
446   // Checks if {node} has any uses, and therefore code has to be generated for
447   // it.
448   bool IsUsed(Node* node) const;
449 
450   // Checks if {node} is currently live.
IsLive(Node * node)451   bool IsLive(Node* node) const { return !IsDefined(node) && IsUsed(node); }
452 
453   // Gets the effect level of {node}.
454   int GetEffectLevel(Node* node) const;
455 
456   // Gets the effect level of {node}, appropriately adjusted based on
457   // continuation flags if the node is a branch.
458   int GetEffectLevel(Node* node, FlagsContinuation* cont) const;
459 
460   int GetVirtualRegister(const Node* node);
461   const std::map<NodeId, int> GetVirtualRegistersForTesting() const;
462 
463   // Check if we can generate loads and stores of ExternalConstants relative
464   // to the roots register.
465   bool CanAddressRelativeToRootsRegister(
466       const ExternalReference& reference) const;
467   // Check if we can use the roots register to access GC roots.
468   bool CanUseRootsRegister() const;
469 
isolate()470   Isolate* isolate() const { return sequence()->isolate(); }
471 
instr_origins()472   const ZoneVector<std::pair<int, int>>& instr_origins() const {
473     return instr_origins_;
474   }
475 
476  private:
477   friend class OperandGenerator;
478 
UseInstructionScheduling()479   bool UseInstructionScheduling() const {
480     return (enable_scheduling_ == kEnableScheduling) &&
481            InstructionScheduler::SchedulerSupported();
482   }
483 
484   void AppendDeoptimizeArguments(InstructionOperandVector* args,
485                                  DeoptimizeReason reason, NodeId node_id,
486                                  FeedbackSource const& feedback,
487                                  FrameState frame_state);
488 
489   void EmitTableSwitch(const SwitchInfo& sw,
490                        InstructionOperand const& index_operand);
491   void EmitBinarySearchSwitch(const SwitchInfo& sw,
492                               InstructionOperand const& value_operand);
493 
494   void TryRename(InstructionOperand* op);
495   int GetRename(int virtual_register);
496   void SetRename(const Node* node, const Node* rename);
497   void UpdateRenames(Instruction* instruction);
498   void UpdateRenamesInPhi(PhiInstruction* phi);
499 
500   // Inform the instruction selection that {node} was just defined.
501   void MarkAsDefined(Node* node);
502 
503   // Inform the instruction selection that {node} has at least one use and we
504   // will need to generate code for it.
505   void MarkAsUsed(Node* node);
506 
507   // Sets the effect level of {node}.
508   void SetEffectLevel(Node* node, int effect_level);
509 
510   // Inform the register allocation of the representation of the value produced
511   // by {node}.
512   void MarkAsRepresentation(MachineRepresentation rep, Node* node);
MarkAsWord32(Node * node)513   void MarkAsWord32(Node* node) {
514     MarkAsRepresentation(MachineRepresentation::kWord32, node);
515   }
MarkAsWord64(Node * node)516   void MarkAsWord64(Node* node) {
517     MarkAsRepresentation(MachineRepresentation::kWord64, node);
518   }
MarkAsFloat32(Node * node)519   void MarkAsFloat32(Node* node) {
520     MarkAsRepresentation(MachineRepresentation::kFloat32, node);
521   }
MarkAsFloat64(Node * node)522   void MarkAsFloat64(Node* node) {
523     MarkAsRepresentation(MachineRepresentation::kFloat64, node);
524   }
MarkAsSimd128(Node * node)525   void MarkAsSimd128(Node* node) {
526     MarkAsRepresentation(MachineRepresentation::kSimd128, node);
527   }
MarkAsTagged(Node * node)528   void MarkAsTagged(Node* node) {
529     MarkAsRepresentation(MachineRepresentation::kTagged, node);
530   }
MarkAsCompressed(Node * node)531   void MarkAsCompressed(Node* node) {
532     MarkAsRepresentation(MachineRepresentation::kCompressed, node);
533   }
534 
535   // Inform the register allocation of the representation of the unallocated
536   // operand {op}.
537   void MarkAsRepresentation(MachineRepresentation rep,
538                             const InstructionOperand& op);
539 
540   enum CallBufferFlag {
541     kCallCodeImmediate = 1u << 0,
542     kCallAddressImmediate = 1u << 1,
543     kCallTail = 1u << 2,
544     kCallFixedTargetRegister = 1u << 3
545   };
546   using CallBufferFlags = base::Flags<CallBufferFlag>;
547 
548   // Initialize the call buffer with the InstructionOperands, nodes, etc,
549   // corresponding
550   // to the inputs and outputs of the call.
551   // {call_code_immediate} to generate immediate operands to calls of code.
552   // {call_address_immediate} to generate immediate operands to address calls.
553   void InitializeCallBuffer(Node* call, CallBuffer* buffer,
554                             CallBufferFlags flags, int stack_slot_delta = 0);
555   bool IsTailCallAddressImmediate();
556 
557   void UpdateMaxPushedArgumentCount(size_t count);
558 
559   FrameStateDescriptor* GetFrameStateDescriptor(FrameState node);
560   size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor,
561                                          FrameState state, OperandGenerator* g,
562                                          StateObjectDeduplicator* deduplicator,
563                                          InstructionOperandVector* inputs,
564                                          FrameStateInputKind kind, Zone* zone);
565   size_t AddInputsToFrameStateDescriptor(StateValueList* values,
566                                          InstructionOperandVector* inputs,
567                                          OperandGenerator* g,
568                                          StateObjectDeduplicator* deduplicator,
569                                          Node* node, FrameStateInputKind kind,
570                                          Zone* zone);
571   size_t AddOperandToStateValueDescriptor(StateValueList* values,
572                                           InstructionOperandVector* inputs,
573                                           OperandGenerator* g,
574                                           StateObjectDeduplicator* deduplicator,
575                                           Node* input, MachineType type,
576                                           FrameStateInputKind kind, Zone* zone);
577 
578   // ===========================================================================
579   // ============= Architecture-specific graph covering methods. ===============
580   // ===========================================================================
581 
582   // Visit nodes in the given block and generate code.
583   void VisitBlock(BasicBlock* block);
584 
585   // Visit the node for the control flow at the end of the block, generating
586   // code if necessary.
587   void VisitControl(BasicBlock* block);
588 
589   // Visit the node and generate code, if any.
590   void VisitNode(Node* node);
591 
592   // Visit the node and generate code for IEEE 754 functions.
593   void VisitFloat64Ieee754Binop(Node*, InstructionCode code);
594   void VisitFloat64Ieee754Unop(Node*, InstructionCode code);
595 
596 #define DECLARE_GENERATOR(x) void Visit##x(Node* node);
597   MACHINE_OP_LIST(DECLARE_GENERATOR)
598   MACHINE_SIMD_OP_LIST(DECLARE_GENERATOR)
599 #undef DECLARE_GENERATOR
600 
601   // Visit the load node with a value and opcode to replace with.
602   void VisitLoad(Node* node, Node* value, InstructionCode opcode);
603   void VisitLoadTransform(Node* node, Node* value, InstructionCode opcode);
604   void VisitFinishRegion(Node* node);
605   void VisitParameter(Node* node);
606   void VisitIfException(Node* node);
607   void VisitOsrValue(Node* node);
608   void VisitPhi(Node* node);
609   void VisitProjection(Node* node);
610   void VisitConstant(Node* node);
611   void VisitCall(Node* call, BasicBlock* handler = nullptr);
612   void VisitDeoptimizeIf(Node* node);
613   void VisitDeoptimizeUnless(Node* node);
614   void VisitDynamicCheckMapsWithDeoptUnless(Node* node);
615   void VisitTrapIf(Node* node, TrapId trap_id);
616   void VisitTrapUnless(Node* node, TrapId trap_id);
617   void VisitTailCall(Node* call);
618   void VisitGoto(BasicBlock* target);
619   void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
620   void VisitSwitch(Node* node, const SwitchInfo& sw);
621   void VisitDeoptimize(DeoptimizeReason reason, NodeId node_id,
622                        FeedbackSource const& feedback, FrameState frame_state);
623   void VisitSelect(Node* node);
624   void VisitReturn(Node* ret);
625   void VisitThrow(Node* node);
626   void VisitRetain(Node* node);
627   void VisitUnreachable(Node* node);
628   void VisitStaticAssert(Node* node);
629   void VisitDeadValue(Node* node);
630 
631   void VisitStackPointerGreaterThan(Node* node, FlagsContinuation* cont);
632 
633   void VisitWordCompareZero(Node* user, Node* value, FlagsContinuation* cont);
634 
635   void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments,
636                             const CallDescriptor* call_descriptor, Node* node);
637   void EmitPrepareResults(ZoneVector<compiler::PushParameter>* results,
638                           const CallDescriptor* call_descriptor, Node* node);
639 
640   bool CanProduceSignalingNaN(Node* node);
641 
642   void AddOutputToSelectContinuation(OperandGenerator* g, int first_input_index,
643                                      Node* node);
644 
645   // ===========================================================================
646   // ============= Vector instruction (SIMD) helper fns. =======================
647   // ===========================================================================
648 
649 #if V8_ENABLE_WEBASSEMBLY
650   // Canonicalize shuffles to make pattern matching simpler. Returns the shuffle
651   // indices, and a boolean indicating if the shuffle is a swizzle (one input).
652   void CanonicalizeShuffle(Node* node, uint8_t* shuffle, bool* is_swizzle);
653 
654   // Swaps the two first input operands of the node, to help match shuffles
655   // to specific architectural instructions.
656   void SwapShuffleInputs(Node* node);
657 #endif  // V8_ENABLE_WEBASSEMBLY
658 
659   // ===========================================================================
660 
schedule()661   Schedule* schedule() const { return schedule_; }
linkage()662   Linkage* linkage() const { return linkage_; }
sequence()663   InstructionSequence* sequence() const { return sequence_; }
instruction_zone()664   Zone* instruction_zone() const { return sequence()->zone(); }
zone()665   Zone* zone() const { return zone_; }
666 
set_instruction_selection_failed()667   void set_instruction_selection_failed() {
668     instruction_selection_failed_ = true;
669   }
instruction_selection_failed()670   bool instruction_selection_failed() { return instruction_selection_failed_; }
671 
672   void MarkPairProjectionsAsWord32(Node* node);
673   bool IsSourcePositionUsed(Node* node);
674   void VisitWord32AtomicBinaryOperation(Node* node, ArchOpcode int8_op,
675                                         ArchOpcode uint8_op,
676                                         ArchOpcode int16_op,
677                                         ArchOpcode uint16_op,
678                                         ArchOpcode word32_op);
679   void VisitWord64AtomicBinaryOperation(Node* node, ArchOpcode uint8_op,
680                                         ArchOpcode uint16_op,
681                                         ArchOpcode uint32_op,
682                                         ArchOpcode uint64_op);
683   void VisitWord64AtomicNarrowBinop(Node* node, ArchOpcode uint8_op,
684                                     ArchOpcode uint16_op, ArchOpcode uint32_op);
685 
686 #if V8_TARGET_ARCH_64_BIT
687   bool ZeroExtendsWord32ToWord64(Node* node, int recursion_depth = 0);
688   bool ZeroExtendsWord32ToWord64NoPhis(Node* node);
689 
690   enum Upper32BitsState : uint8_t {
691     kNotYetChecked,
692     kUpperBitsGuaranteedZero,
693     kNoGuarantee,
694   };
695 #endif  // V8_TARGET_ARCH_64_BIT
696 
697   struct FrameStateInput {
FrameStateInputFrameStateInput698     FrameStateInput(Node* node_, FrameStateInputKind kind_)
699         : node(node_), kind(kind_) {}
700 
701     Node* node;
702     FrameStateInputKind kind;
703 
704     struct Hash {
operatorFrameStateInput::Hash705       size_t operator()(FrameStateInput const& source) const {
706         return base::hash_combine(source.node,
707                                   static_cast<size_t>(source.kind));
708       }
709     };
710 
711     struct Equal {
operatorFrameStateInput::Equal712       bool operator()(FrameStateInput const& lhs,
713                       FrameStateInput const& rhs) const {
714         return lhs.node == rhs.node && lhs.kind == rhs.kind;
715       }
716     };
717   };
718 
719   struct CachedStateValues;
720   class CachedStateValuesBuilder;
721 
722   // ===========================================================================
723 
724   Zone* const zone_;
725   Linkage* const linkage_;
726   InstructionSequence* const sequence_;
727   SourcePositionTable* const source_positions_;
728   SourcePositionMode const source_position_mode_;
729   Features features_;
730   Schedule* const schedule_;
731   BasicBlock* current_block_;
732   ZoneVector<Instruction*> instructions_;
733   InstructionOperandVector continuation_inputs_;
734   InstructionOperandVector continuation_outputs_;
735   InstructionOperandVector continuation_temps_;
736   BoolVector defined_;
737   BoolVector used_;
738   IntVector effect_level_;
739   int current_effect_level_;
740   IntVector virtual_registers_;
741   IntVector virtual_register_rename_;
742   InstructionScheduler* scheduler_;
743   EnableScheduling enable_scheduling_;
744   EnableRootsRelativeAddressing enable_roots_relative_addressing_;
745   EnableSwitchJumpTable enable_switch_jump_table_;
746   ZoneUnorderedMap<FrameStateInput, CachedStateValues*, FrameStateInput::Hash,
747                    FrameStateInput::Equal>
748       state_values_cache_;
749 
750   Frame* frame_;
751   bool instruction_selection_failed_;
752   ZoneVector<std::pair<int, int>> instr_origins_;
753   EnableTraceTurboJson trace_turbo_;
754   TickCounter* const tick_counter_;
755   // The broker is only used for unparking the LocalHeap for diagnostic printing
756   // for failed StaticAsserts.
757   JSHeapBroker* const broker_;
758 
759   // Store the maximal unoptimized frame height and an maximal number of pushed
760   // arguments (for calls). Later used to apply an offset to stack checks.
761   size_t* max_unoptimized_frame_height_;
762   size_t* max_pushed_argument_count_;
763 
764 #if V8_TARGET_ARCH_64_BIT
765   // Holds lazily-computed results for whether phi nodes guarantee their upper
766   // 32 bits to be zero. Indexed by node ID; nobody reads or writes the values
767   // for non-phi nodes.
768   ZoneVector<Upper32BitsState> phi_states_;
769 #endif
770 };
771 
772 }  // namespace compiler
773 }  // namespace internal
774 }  // namespace v8
775 
776 #endif  // V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_H_
777