1 // Copyright 2014 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_H_ 6 #define V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_H_ 7 8 #include <map> 9 10 #include "src/codegen/cpu-features.h" 11 #include "src/common/globals.h" 12 #include "src/compiler/backend/instruction-scheduler.h" 13 #include "src/compiler/backend/instruction.h" 14 #include "src/compiler/common-operator.h" 15 #include "src/compiler/feedback-source.h" 16 #include "src/compiler/linkage.h" 17 #include "src/compiler/machine-operator.h" 18 #include "src/compiler/node.h" 19 #include "src/wasm/simd-shuffle.h" 20 #include "src/zone/zone-containers.h" 21 22 namespace v8 { 23 namespace internal { 24 25 class TickCounter; 26 27 namespace compiler { 28 29 // Forward declarations. 30 class BasicBlock; 31 struct CallBuffer; // TODO(bmeurer): Remove this. 32 class Linkage; 33 class OperandGenerator; 34 class SwitchInfo; 35 class StateObjectDeduplicator; 36 37 // The flags continuation is a way to combine a branch or a materialization 38 // of a boolean value with an instruction that sets the flags register. 39 // The whole instruction is treated as a unit by the register allocator, and 40 // thus no spills or moves can be introduced between the flags-setting 41 // instruction and the branch or set it should be combined with. 42 class FlagsContinuation final { 43 public: FlagsContinuation()44 FlagsContinuation() : mode_(kFlags_none) {} 45 46 // Creates a new flags continuation from the given condition and true/false 47 // blocks. ForBranch(FlagsCondition condition,BasicBlock * true_block,BasicBlock * false_block)48 static FlagsContinuation ForBranch(FlagsCondition condition, 49 BasicBlock* true_block, 50 BasicBlock* false_block) { 51 return FlagsContinuation(kFlags_branch, condition, true_block, false_block); 52 } 53 ForBranchAndPoison(FlagsCondition condition,BasicBlock * true_block,BasicBlock * false_block)54 static FlagsContinuation ForBranchAndPoison(FlagsCondition condition, 55 BasicBlock* true_block, 56 BasicBlock* false_block) { 57 return FlagsContinuation(kFlags_branch_and_poison, condition, true_block, 58 false_block); 59 } 60 61 // Creates a new flags continuation for an eager deoptimization exit. ForDeoptimize(FlagsCondition condition,DeoptimizeKind kind,DeoptimizeReason reason,FeedbackSource const & feedback,Node * frame_state)62 static FlagsContinuation ForDeoptimize(FlagsCondition condition, 63 DeoptimizeKind kind, 64 DeoptimizeReason reason, 65 FeedbackSource const& feedback, 66 Node* frame_state) { 67 return FlagsContinuation(kFlags_deoptimize, condition, kind, reason, 68 feedback, frame_state); 69 } 70 71 // Creates a new flags continuation for an eager deoptimization exit. ForDeoptimizeAndPoison(FlagsCondition condition,DeoptimizeKind kind,DeoptimizeReason reason,FeedbackSource const & feedback,Node * frame_state)72 static FlagsContinuation ForDeoptimizeAndPoison( 73 FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason, 74 FeedbackSource const& feedback, Node* frame_state) { 75 return FlagsContinuation(kFlags_deoptimize_and_poison, condition, kind, 76 reason, feedback, frame_state); 77 } 78 79 // Creates a new flags continuation for a boolean value. ForSet(FlagsCondition condition,Node * result)80 static FlagsContinuation ForSet(FlagsCondition condition, Node* result) { 81 return FlagsContinuation(condition, result); 82 } 83 84 // Creates a new flags continuation for a wasm trap. ForTrap(FlagsCondition condition,TrapId trap_id,Node * result)85 static FlagsContinuation ForTrap(FlagsCondition condition, TrapId trap_id, 86 Node* result) { 87 return FlagsContinuation(condition, trap_id, result); 88 } 89 IsNone()90 bool IsNone() const { return mode_ == kFlags_none; } IsBranch()91 bool IsBranch() const { 92 return mode_ == kFlags_branch || mode_ == kFlags_branch_and_poison; 93 } IsDeoptimize()94 bool IsDeoptimize() const { 95 return mode_ == kFlags_deoptimize || mode_ == kFlags_deoptimize_and_poison; 96 } IsPoisoned()97 bool IsPoisoned() const { 98 return mode_ == kFlags_branch_and_poison || 99 mode_ == kFlags_deoptimize_and_poison; 100 } IsSet()101 bool IsSet() const { return mode_ == kFlags_set; } IsTrap()102 bool IsTrap() const { return mode_ == kFlags_trap; } condition()103 FlagsCondition condition() const { 104 DCHECK(!IsNone()); 105 return condition_; 106 } kind()107 DeoptimizeKind kind() const { 108 DCHECK(IsDeoptimize()); 109 return kind_; 110 } reason()111 DeoptimizeReason reason() const { 112 DCHECK(IsDeoptimize()); 113 return reason_; 114 } feedback()115 FeedbackSource const& feedback() const { 116 DCHECK(IsDeoptimize()); 117 return feedback_; 118 } frame_state()119 Node* frame_state() const { 120 DCHECK(IsDeoptimize()); 121 return frame_state_or_result_; 122 } result()123 Node* result() const { 124 DCHECK(IsSet()); 125 return frame_state_or_result_; 126 } trap_id()127 TrapId trap_id() const { 128 DCHECK(IsTrap()); 129 return trap_id_; 130 } true_block()131 BasicBlock* true_block() const { 132 DCHECK(IsBranch()); 133 return true_block_; 134 } false_block()135 BasicBlock* false_block() const { 136 DCHECK(IsBranch()); 137 return false_block_; 138 } 139 Negate()140 void Negate() { 141 DCHECK(!IsNone()); 142 condition_ = NegateFlagsCondition(condition_); 143 } 144 Commute()145 void Commute() { 146 DCHECK(!IsNone()); 147 condition_ = CommuteFlagsCondition(condition_); 148 } 149 Overwrite(FlagsCondition condition)150 void Overwrite(FlagsCondition condition) { condition_ = condition; } 151 OverwriteAndNegateIfEqual(FlagsCondition condition)152 void OverwriteAndNegateIfEqual(FlagsCondition condition) { 153 DCHECK(condition_ == kEqual || condition_ == kNotEqual); 154 bool negate = condition_ == kEqual; 155 condition_ = condition; 156 if (negate) Negate(); 157 } 158 OverwriteUnsignedIfSigned()159 void OverwriteUnsignedIfSigned() { 160 switch (condition_) { 161 case kSignedLessThan: 162 condition_ = kUnsignedLessThan; 163 break; 164 case kSignedLessThanOrEqual: 165 condition_ = kUnsignedLessThanOrEqual; 166 break; 167 case kSignedGreaterThan: 168 condition_ = kUnsignedGreaterThan; 169 break; 170 case kSignedGreaterThanOrEqual: 171 condition_ = kUnsignedGreaterThanOrEqual; 172 break; 173 default: 174 break; 175 } 176 } 177 178 // Encodes this flags continuation into the given opcode. Encode(InstructionCode opcode)179 InstructionCode Encode(InstructionCode opcode) { 180 opcode |= FlagsModeField::encode(mode_); 181 if (mode_ != kFlags_none) { 182 opcode |= FlagsConditionField::encode(condition_); 183 } 184 return opcode; 185 } 186 187 private: FlagsContinuation(FlagsMode mode,FlagsCondition condition,BasicBlock * true_block,BasicBlock * false_block)188 FlagsContinuation(FlagsMode mode, FlagsCondition condition, 189 BasicBlock* true_block, BasicBlock* false_block) 190 : mode_(mode), 191 condition_(condition), 192 true_block_(true_block), 193 false_block_(false_block) { 194 DCHECK(mode == kFlags_branch || mode == kFlags_branch_and_poison); 195 DCHECK_NOT_NULL(true_block); 196 DCHECK_NOT_NULL(false_block); 197 } 198 FlagsContinuation(FlagsMode mode,FlagsCondition condition,DeoptimizeKind kind,DeoptimizeReason reason,FeedbackSource const & feedback,Node * frame_state)199 FlagsContinuation(FlagsMode mode, FlagsCondition condition, 200 DeoptimizeKind kind, DeoptimizeReason reason, 201 FeedbackSource const& feedback, Node* frame_state) 202 : mode_(mode), 203 condition_(condition), 204 kind_(kind), 205 reason_(reason), 206 feedback_(feedback), 207 frame_state_or_result_(frame_state) { 208 DCHECK(mode == kFlags_deoptimize || mode == kFlags_deoptimize_and_poison); 209 DCHECK_NOT_NULL(frame_state); 210 } 211 FlagsContinuation(FlagsCondition condition,Node * result)212 FlagsContinuation(FlagsCondition condition, Node* result) 213 : mode_(kFlags_set), 214 condition_(condition), 215 frame_state_or_result_(result) { 216 DCHECK_NOT_NULL(result); 217 } 218 FlagsContinuation(FlagsCondition condition,TrapId trap_id,Node * result)219 FlagsContinuation(FlagsCondition condition, TrapId trap_id, Node* result) 220 : mode_(kFlags_trap), 221 condition_(condition), 222 frame_state_or_result_(result), 223 trap_id_(trap_id) { 224 DCHECK_NOT_NULL(result); 225 } 226 227 FlagsMode const mode_; 228 FlagsCondition condition_; 229 DeoptimizeKind kind_; // Only valid if mode_ == kFlags_deoptimize* 230 DeoptimizeReason reason_; // Only valid if mode_ == kFlags_deoptimize* 231 FeedbackSource feedback_; // Only valid if mode_ == kFlags_deoptimize* 232 Node* frame_state_or_result_; // Only valid if mode_ == kFlags_deoptimize* 233 // or mode_ == kFlags_set. 234 BasicBlock* true_block_; // Only valid if mode_ == kFlags_branch*. 235 BasicBlock* false_block_; // Only valid if mode_ == kFlags_branch*. 236 TrapId trap_id_; // Only valid if mode_ == kFlags_trap. 237 }; 238 239 // This struct connects nodes of parameters which are going to be pushed on the 240 // call stack with their parameter index in the call descriptor of the callee. 241 struct PushParameter { 242 PushParameter(Node* n = nullptr, 243 LinkageLocation l = LinkageLocation::ForAnyRegister()) nodePushParameter244 : node(n), location(l) {} 245 246 Node* node; 247 LinkageLocation location; 248 }; 249 250 enum class FrameStateInputKind { kAny, kStackSlot }; 251 252 // Instruction selection generates an InstructionSequence for a given Schedule. 253 class V8_EXPORT_PRIVATE InstructionSelector final { 254 public: 255 // Forward declarations. 256 class Features; 257 258 enum SourcePositionMode { kCallSourcePositions, kAllSourcePositions }; 259 enum EnableScheduling { kDisableScheduling, kEnableScheduling }; 260 enum EnableRootsRelativeAddressing { 261 kDisableRootsRelativeAddressing, 262 kEnableRootsRelativeAddressing 263 }; 264 enum EnableSwitchJumpTable { 265 kDisableSwitchJumpTable, 266 kEnableSwitchJumpTable 267 }; 268 enum EnableTraceTurboJson { kDisableTraceTurboJson, kEnableTraceTurboJson }; 269 270 InstructionSelector( 271 Zone* zone, size_t node_count, Linkage* linkage, 272 InstructionSequence* sequence, Schedule* schedule, 273 SourcePositionTable* source_positions, Frame* frame, 274 EnableSwitchJumpTable enable_switch_jump_table, TickCounter* tick_counter, 275 JSHeapBroker* broker, size_t* max_unoptimized_frame_height, 276 size_t* max_pushed_argument_count, 277 SourcePositionMode source_position_mode = kCallSourcePositions, 278 Features features = SupportedFeatures(), 279 EnableScheduling enable_scheduling = FLAG_turbo_instruction_scheduling 280 ? kEnableScheduling 281 : kDisableScheduling, 282 EnableRootsRelativeAddressing enable_roots_relative_addressing = 283 kDisableRootsRelativeAddressing, 284 PoisoningMitigationLevel poisoning_level = 285 PoisoningMitigationLevel::kDontPoison, 286 EnableTraceTurboJson trace_turbo = kDisableTraceTurboJson); 287 288 // Visit code for the entire graph with the included schedule. 289 bool SelectInstructions(); 290 291 void StartBlock(RpoNumber rpo); 292 void EndBlock(RpoNumber rpo); 293 void AddInstruction(Instruction* instr); 294 void AddTerminator(Instruction* instr); 295 296 // =========================================================================== 297 // ============= Architecture-independent code emission methods. ============= 298 // =========================================================================== 299 300 Instruction* Emit(InstructionCode opcode, InstructionOperand output, 301 size_t temp_count = 0, InstructionOperand* temps = nullptr); 302 Instruction* Emit(InstructionCode opcode, InstructionOperand output, 303 InstructionOperand a, size_t temp_count = 0, 304 InstructionOperand* temps = nullptr); 305 Instruction* Emit(InstructionCode opcode, InstructionOperand output, 306 InstructionOperand a, InstructionOperand b, 307 size_t temp_count = 0, InstructionOperand* temps = nullptr); 308 Instruction* Emit(InstructionCode opcode, InstructionOperand output, 309 InstructionOperand a, InstructionOperand b, 310 InstructionOperand c, size_t temp_count = 0, 311 InstructionOperand* temps = nullptr); 312 Instruction* Emit(InstructionCode opcode, InstructionOperand output, 313 InstructionOperand a, InstructionOperand b, 314 InstructionOperand c, InstructionOperand d, 315 size_t temp_count = 0, InstructionOperand* temps = nullptr); 316 Instruction* Emit(InstructionCode opcode, InstructionOperand output, 317 InstructionOperand a, InstructionOperand b, 318 InstructionOperand c, InstructionOperand d, 319 InstructionOperand e, size_t temp_count = 0, 320 InstructionOperand* temps = nullptr); 321 Instruction* Emit(InstructionCode opcode, InstructionOperand output, 322 InstructionOperand a, InstructionOperand b, 323 InstructionOperand c, InstructionOperand d, 324 InstructionOperand e, InstructionOperand f, 325 size_t temp_count = 0, InstructionOperand* temps = nullptr); 326 Instruction* Emit(InstructionCode opcode, size_t output_count, 327 InstructionOperand* outputs, size_t input_count, 328 InstructionOperand* inputs, size_t temp_count = 0, 329 InstructionOperand* temps = nullptr); 330 Instruction* Emit(Instruction* instr); 331 332 // [0-3] operand instructions with no output, uses labels for true and false 333 // blocks of the continuation. 334 Instruction* EmitWithContinuation(InstructionCode opcode, 335 FlagsContinuation* cont); 336 Instruction* EmitWithContinuation(InstructionCode opcode, 337 InstructionOperand a, 338 FlagsContinuation* cont); 339 Instruction* EmitWithContinuation(InstructionCode opcode, 340 InstructionOperand a, InstructionOperand b, 341 FlagsContinuation* cont); 342 Instruction* EmitWithContinuation(InstructionCode opcode, 343 InstructionOperand a, InstructionOperand b, 344 InstructionOperand c, 345 FlagsContinuation* cont); 346 Instruction* EmitWithContinuation(InstructionCode opcode, size_t output_count, 347 InstructionOperand* outputs, 348 size_t input_count, 349 InstructionOperand* inputs, 350 FlagsContinuation* cont); 351 Instruction* EmitWithContinuation( 352 InstructionCode opcode, size_t output_count, InstructionOperand* outputs, 353 size_t input_count, InstructionOperand* inputs, size_t temp_count, 354 InstructionOperand* temps, FlagsContinuation* cont); 355 356 void EmitIdentity(Node* node); 357 358 // =========================================================================== 359 // ============== Architecture-independent CPU feature methods. ============== 360 // =========================================================================== 361 362 class Features final { 363 public: Features()364 Features() : bits_(0) {} Features(unsigned bits)365 explicit Features(unsigned bits) : bits_(bits) {} Features(CpuFeature f)366 explicit Features(CpuFeature f) : bits_(1u << f) {} Features(CpuFeature f1,CpuFeature f2)367 Features(CpuFeature f1, CpuFeature f2) : bits_((1u << f1) | (1u << f2)) {} 368 Contains(CpuFeature f)369 bool Contains(CpuFeature f) const { return (bits_ & (1u << f)); } 370 371 private: 372 unsigned bits_; 373 }; 374 IsSupported(CpuFeature feature)375 bool IsSupported(CpuFeature feature) const { 376 return features_.Contains(feature); 377 } 378 379 // Returns the features supported on the target platform. SupportedFeatures()380 static Features SupportedFeatures() { 381 return Features(CpuFeatures::SupportedFeatures()); 382 } 383 384 // TODO(sigurds) This should take a CpuFeatures argument. 385 static MachineOperatorBuilder::Flags SupportedMachineOperatorFlags(); 386 387 static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements(); 388 389 bool NeedsPoisoning(IsSafetyCheck safety_check) const; 390 391 // =========================================================================== 392 // ============ Architecture-independent graph covering methods. ============= 393 // =========================================================================== 394 395 // Used in pattern matching during code generation. 396 // Check if {node} can be covered while generating code for the current 397 // instruction. A node can be covered if the {user} of the node has the only 398 // edge and the two are in the same basic block. 399 bool CanCover(Node* user, Node* node) const; 400 // CanCover is not transitive. The counter example are Nodes A,B,C such that 401 // CanCover(A, B) and CanCover(B,C) and B is pure: The the effect level of A 402 // and B might differ. CanCoverTransitively does the additional checks. 403 bool CanCoverTransitively(Node* user, Node* node, Node* node_input) const; 404 405 // Used in pattern matching during code generation. 406 // This function checks that {node} and {user} are in the same basic block, 407 // and that {user} is the only user of {node} in this basic block. This 408 // check guarantees that there are no users of {node} scheduled between 409 // {node} and {user}, and thus we can select a single instruction for both 410 // nodes, if such an instruction exists. This check can be used for example 411 // when selecting instructions for: 412 // n = Int32Add(a, b) 413 // c = Word32Compare(n, 0, cond) 414 // Branch(c, true_label, false_label) 415 // Here we can generate a flag-setting add instruction, even if the add has 416 // uses in other basic blocks, since the flag-setting add instruction will 417 // still generate the result of the addition and not just set the flags. 418 // However, if we had uses of the add in the same basic block, we could have: 419 // n = Int32Add(a, b) 420 // o = OtherOp(n, ...) 421 // c = Word32Compare(n, 0, cond) 422 // Branch(c, true_label, false_label) 423 // where we cannot select the add and the compare together. If we were to 424 // select a flag-setting add instruction for Word32Compare and Int32Add while 425 // visiting Word32Compare, we would then have to select an instruction for 426 // OtherOp *afterwards*, which means we would attempt to use the result of 427 // the add before we have defined it. 428 bool IsOnlyUserOfNodeInSameBlock(Node* user, Node* node) const; 429 430 // Checks if {node} was already defined, and therefore code was already 431 // generated for it. 432 bool IsDefined(Node* node) const; 433 434 // Checks if {node} has any uses, and therefore code has to be generated for 435 // it. 436 bool IsUsed(Node* node) const; 437 438 // Checks if {node} is currently live. IsLive(Node * node)439 bool IsLive(Node* node) const { return !IsDefined(node) && IsUsed(node); } 440 441 // Gets the effect level of {node}. 442 int GetEffectLevel(Node* node) const; 443 444 // Gets the effect level of {node}, appropriately adjusted based on 445 // continuation flags if the node is a branch. 446 int GetEffectLevel(Node* node, FlagsContinuation* cont) const; 447 448 int GetVirtualRegister(const Node* node); 449 const std::map<NodeId, int> GetVirtualRegistersForTesting() const; 450 451 // Check if we can generate loads and stores of ExternalConstants relative 452 // to the roots register. 453 bool CanAddressRelativeToRootsRegister( 454 const ExternalReference& reference) const; 455 // Check if we can use the roots register to access GC roots. 456 bool CanUseRootsRegister() const; 457 isolate()458 Isolate* isolate() const { return sequence()->isolate(); } 459 instr_origins()460 const ZoneVector<std::pair<int, int>>& instr_origins() const { 461 return instr_origins_; 462 } 463 464 private: 465 friend class OperandGenerator; 466 UseInstructionScheduling()467 bool UseInstructionScheduling() const { 468 return (enable_scheduling_ == kEnableScheduling) && 469 InstructionScheduler::SchedulerSupported(); 470 } 471 472 void AppendDeoptimizeArguments(InstructionOperandVector* args, 473 DeoptimizeKind kind, DeoptimizeReason reason, 474 FeedbackSource const& feedback, 475 Node* frame_state); 476 477 void EmitTableSwitch(const SwitchInfo& sw, 478 InstructionOperand const& index_operand); 479 void EmitBinarySearchSwitch(const SwitchInfo& sw, 480 InstructionOperand const& value_operand); 481 482 void TryRename(InstructionOperand* op); 483 int GetRename(int virtual_register); 484 void SetRename(const Node* node, const Node* rename); 485 void UpdateRenames(Instruction* instruction); 486 void UpdateRenamesInPhi(PhiInstruction* phi); 487 488 // Inform the instruction selection that {node} was just defined. 489 void MarkAsDefined(Node* node); 490 491 // Inform the instruction selection that {node} has at least one use and we 492 // will need to generate code for it. 493 void MarkAsUsed(Node* node); 494 495 // Sets the effect level of {node}. 496 void SetEffectLevel(Node* node, int effect_level); 497 498 // Inform the register allocation of the representation of the value produced 499 // by {node}. 500 void MarkAsRepresentation(MachineRepresentation rep, Node* node); MarkAsWord32(Node * node)501 void MarkAsWord32(Node* node) { 502 MarkAsRepresentation(MachineRepresentation::kWord32, node); 503 } MarkAsWord64(Node * node)504 void MarkAsWord64(Node* node) { 505 MarkAsRepresentation(MachineRepresentation::kWord64, node); 506 } MarkAsFloat32(Node * node)507 void MarkAsFloat32(Node* node) { 508 MarkAsRepresentation(MachineRepresentation::kFloat32, node); 509 } MarkAsFloat64(Node * node)510 void MarkAsFloat64(Node* node) { 511 MarkAsRepresentation(MachineRepresentation::kFloat64, node); 512 } MarkAsSimd128(Node * node)513 void MarkAsSimd128(Node* node) { 514 MarkAsRepresentation(MachineRepresentation::kSimd128, node); 515 } MarkAsTagged(Node * node)516 void MarkAsTagged(Node* node) { 517 MarkAsRepresentation(MachineRepresentation::kTagged, node); 518 } MarkAsCompressed(Node * node)519 void MarkAsCompressed(Node* node) { 520 MarkAsRepresentation(MachineRepresentation::kCompressed, node); 521 } 522 523 // Inform the register allocation of the representation of the unallocated 524 // operand {op}. 525 void MarkAsRepresentation(MachineRepresentation rep, 526 const InstructionOperand& op); 527 528 enum CallBufferFlag { 529 kCallCodeImmediate = 1u << 0, 530 kCallAddressImmediate = 1u << 1, 531 kCallTail = 1u << 2, 532 kCallFixedTargetRegister = 1u << 3 533 }; 534 using CallBufferFlags = base::Flags<CallBufferFlag>; 535 536 // Initialize the call buffer with the InstructionOperands, nodes, etc, 537 // corresponding 538 // to the inputs and outputs of the call. 539 // {call_code_immediate} to generate immediate operands to calls of code. 540 // {call_address_immediate} to generate immediate operands to address calls. 541 void InitializeCallBuffer(Node* call, CallBuffer* buffer, 542 CallBufferFlags flags, bool is_tail_call, 543 int stack_slot_delta = 0); 544 bool IsTailCallAddressImmediate(); 545 int GetTempsCountForTailCallFromJSFunction(); 546 547 void UpdateMaxPushedArgumentCount(size_t count); 548 549 FrameStateDescriptor* GetFrameStateDescriptor(Node* node); 550 size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor, 551 Node* state, OperandGenerator* g, 552 StateObjectDeduplicator* deduplicator, 553 InstructionOperandVector* inputs, 554 FrameStateInputKind kind, Zone* zone); 555 size_t AddInputsToFrameStateDescriptor(StateValueList* values, 556 InstructionOperandVector* inputs, 557 OperandGenerator* g, 558 StateObjectDeduplicator* deduplicator, 559 Node* node, FrameStateInputKind kind, 560 Zone* zone); 561 size_t AddOperandToStateValueDescriptor(StateValueList* values, 562 InstructionOperandVector* inputs, 563 OperandGenerator* g, 564 StateObjectDeduplicator* deduplicator, 565 Node* input, MachineType type, 566 FrameStateInputKind kind, Zone* zone); 567 568 // =========================================================================== 569 // ============= Architecture-specific graph covering methods. =============== 570 // =========================================================================== 571 572 // Visit nodes in the given block and generate code. 573 void VisitBlock(BasicBlock* block); 574 575 // Visit the node for the control flow at the end of the block, generating 576 // code if necessary. 577 void VisitControl(BasicBlock* block); 578 579 // Visit the node and generate code, if any. 580 void VisitNode(Node* node); 581 582 // Visit the node and generate code for IEEE 754 functions. 583 void VisitFloat64Ieee754Binop(Node*, InstructionCode code); 584 void VisitFloat64Ieee754Unop(Node*, InstructionCode code); 585 586 #define DECLARE_GENERATOR(x) void Visit##x(Node* node); 587 MACHINE_OP_LIST(DECLARE_GENERATOR) 588 MACHINE_SIMD_OP_LIST(DECLARE_GENERATOR) 589 #undef DECLARE_GENERATOR 590 591 // Visit the load node with a value and opcode to replace with. 592 void VisitLoad(Node* node, Node* value, InstructionCode opcode); 593 void VisitLoadTransform(Node* node, Node* value, InstructionCode opcode); 594 void VisitFinishRegion(Node* node); 595 void VisitParameter(Node* node); 596 void VisitIfException(Node* node); 597 void VisitOsrValue(Node* node); 598 void VisitPhi(Node* node); 599 void VisitProjection(Node* node); 600 void VisitConstant(Node* node); 601 void VisitCall(Node* call, BasicBlock* handler = nullptr); 602 void VisitDeoptimizeIf(Node* node); 603 void VisitDeoptimizeUnless(Node* node); 604 void VisitTrapIf(Node* node, TrapId trap_id); 605 void VisitTrapUnless(Node* node, TrapId trap_id); 606 void VisitTailCall(Node* call); 607 void VisitGoto(BasicBlock* target); 608 void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch); 609 void VisitSwitch(Node* node, const SwitchInfo& sw); 610 void VisitDeoptimize(DeoptimizeKind kind, DeoptimizeReason reason, 611 FeedbackSource const& feedback, Node* frame_state); 612 void VisitReturn(Node* ret); 613 void VisitThrow(Node* node); 614 void VisitRetain(Node* node); 615 void VisitUnreachable(Node* node); 616 void VisitStaticAssert(Node* node); 617 void VisitDeadValue(Node* node); 618 619 void VisitStackPointerGreaterThan(Node* node, FlagsContinuation* cont); 620 621 void VisitWordCompareZero(Node* user, Node* value, FlagsContinuation* cont); 622 623 void EmitWordPoisonOnSpeculation(Node* node); 624 625 void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments, 626 const CallDescriptor* call_descriptor, Node* node); 627 void EmitPrepareResults(ZoneVector<compiler::PushParameter>* results, 628 const CallDescriptor* call_descriptor, Node* node); 629 630 bool CanProduceSignalingNaN(Node* node); 631 632 // =========================================================================== 633 // ============= Vector instruction (SIMD) helper fns. ======================= 634 // =========================================================================== 635 636 // Canonicalize shuffles to make pattern matching simpler. Returns the shuffle 637 // indices, and a boolean indicating if the shuffle is a swizzle (one input). 638 void CanonicalizeShuffle(Node* node, uint8_t* shuffle, bool* is_swizzle); 639 640 // Swaps the two first input operands of the node, to help match shuffles 641 // to specific architectural instructions. 642 void SwapShuffleInputs(Node* node); 643 644 // =========================================================================== 645 schedule()646 Schedule* schedule() const { return schedule_; } linkage()647 Linkage* linkage() const { return linkage_; } sequence()648 InstructionSequence* sequence() const { return sequence_; } instruction_zone()649 Zone* instruction_zone() const { return sequence()->zone(); } zone()650 Zone* zone() const { return zone_; } 651 set_instruction_selection_failed()652 void set_instruction_selection_failed() { 653 instruction_selection_failed_ = true; 654 } instruction_selection_failed()655 bool instruction_selection_failed() { return instruction_selection_failed_; } 656 657 void MarkPairProjectionsAsWord32(Node* node); 658 bool IsSourcePositionUsed(Node* node); 659 void VisitWord32AtomicBinaryOperation(Node* node, ArchOpcode int8_op, 660 ArchOpcode uint8_op, 661 ArchOpcode int16_op, 662 ArchOpcode uint16_op, 663 ArchOpcode word32_op); 664 void VisitWord64AtomicBinaryOperation(Node* node, ArchOpcode uint8_op, 665 ArchOpcode uint16_op, 666 ArchOpcode uint32_op, 667 ArchOpcode uint64_op); 668 void VisitWord64AtomicNarrowBinop(Node* node, ArchOpcode uint8_op, 669 ArchOpcode uint16_op, ArchOpcode uint32_op); 670 671 #if V8_TARGET_ARCH_64_BIT 672 bool ZeroExtendsWord32ToWord64(Node* node, int recursion_depth = 0); 673 bool ZeroExtendsWord32ToWord64NoPhis(Node* node); 674 675 enum Upper32BitsState : uint8_t { 676 kNotYetChecked, 677 kUpperBitsGuaranteedZero, 678 kNoGuarantee, 679 }; 680 #endif // V8_TARGET_ARCH_64_BIT 681 682 // =========================================================================== 683 684 Zone* const zone_; 685 Linkage* const linkage_; 686 InstructionSequence* const sequence_; 687 SourcePositionTable* const source_positions_; 688 SourcePositionMode const source_position_mode_; 689 Features features_; 690 Schedule* const schedule_; 691 BasicBlock* current_block_; 692 ZoneVector<Instruction*> instructions_; 693 InstructionOperandVector continuation_inputs_; 694 InstructionOperandVector continuation_outputs_; 695 InstructionOperandVector continuation_temps_; 696 BoolVector defined_; 697 BoolVector used_; 698 IntVector effect_level_; 699 IntVector virtual_registers_; 700 IntVector virtual_register_rename_; 701 InstructionScheduler* scheduler_; 702 EnableScheduling enable_scheduling_; 703 EnableRootsRelativeAddressing enable_roots_relative_addressing_; 704 EnableSwitchJumpTable enable_switch_jump_table_; 705 706 PoisoningMitigationLevel poisoning_level_; 707 Frame* frame_; 708 bool instruction_selection_failed_; 709 ZoneVector<std::pair<int, int>> instr_origins_; 710 EnableTraceTurboJson trace_turbo_; 711 TickCounter* const tick_counter_; 712 // The broker is only used for unparking the LocalHeap for diagnostic printing 713 // for failed StaticAsserts. 714 JSHeapBroker* const broker_; 715 716 // Store the maximal unoptimized frame height and an maximal number of pushed 717 // arguments (for calls). Later used to apply an offset to stack checks. 718 size_t* max_unoptimized_frame_height_; 719 size_t* max_pushed_argument_count_; 720 721 #if V8_TARGET_ARCH_64_BIT 722 // Holds lazily-computed results for whether phi nodes guarantee their upper 723 // 32 bits to be zero. Indexed by node ID; nobody reads or writes the values 724 // for non-phi nodes. 725 ZoneVector<Upper32BitsState> phi_states_; 726 #endif 727 }; 728 729 } // namespace compiler 730 } // namespace internal 731 } // namespace v8 732 733 #endif // V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_H_ 734