• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_COMPILER_BACKEND_CODE_GENERATOR_H_
6 #define V8_COMPILER_BACKEND_CODE_GENERATOR_H_
7 
8 #include <memory>
9 
10 #include "src/base/optional.h"
11 #include "src/codegen/macro-assembler.h"
12 #include "src/codegen/safepoint-table.h"
13 #include "src/codegen/source-position-table.h"
14 #include "src/compiler/backend/gap-resolver.h"
15 #include "src/compiler/backend/instruction.h"
16 #include "src/compiler/backend/unwinding-info-writer.h"
17 #include "src/compiler/osr.h"
18 #include "src/deoptimizer/deoptimizer.h"
19 #include "src/trap-handler/trap-handler.h"
20 
21 namespace v8 {
22 namespace internal {
23 
24 class OptimizedCompilationInfo;
25 
26 namespace compiler {
27 
28 // Forward declarations.
29 class DeoptimizationExit;
30 class FrameAccessState;
31 class Linkage;
32 class OutOfLineCode;
33 
34 struct BranchInfo {
35   FlagsCondition condition;
36   Label* true_label;
37   Label* false_label;
38   bool fallthru;
39 };
40 
41 class InstructionOperandIterator {
42  public:
InstructionOperandIterator(Instruction * instr,size_t pos)43   InstructionOperandIterator(Instruction* instr, size_t pos)
44       : instr_(instr), pos_(pos) {}
45 
instruction()46   Instruction* instruction() const { return instr_; }
Advance()47   InstructionOperand* Advance() { return instr_->InputAt(pos_++); }
48 
49  private:
50   Instruction* instr_;
51   size_t pos_;
52 };
53 
54 enum class DeoptimizationLiteralKind { kObject, kNumber, kString, kInvalid };
55 
56 // Either a non-null Handle<Object>, a double or a StringConstantBase.
57 class DeoptimizationLiteral {
58  public:
DeoptimizationLiteral()59   DeoptimizationLiteral()
60       : kind_(DeoptimizationLiteralKind::kInvalid),
61         object_(),
62         number_(0),
63         string_(nullptr) {}
DeoptimizationLiteral(Handle<Object> object)64   explicit DeoptimizationLiteral(Handle<Object> object)
65       : kind_(DeoptimizationLiteralKind::kObject), object_(object) {
66     CHECK(!object_.is_null());
67   }
DeoptimizationLiteral(double number)68   explicit DeoptimizationLiteral(double number)
69       : kind_(DeoptimizationLiteralKind::kNumber), number_(number) {}
DeoptimizationLiteral(const StringConstantBase * string)70   explicit DeoptimizationLiteral(const StringConstantBase* string)
71       : kind_(DeoptimizationLiteralKind::kString), string_(string) {}
72 
object()73   Handle<Object> object() const { return object_; }
string()74   const StringConstantBase* string() const { return string_; }
75 
76   bool operator==(const DeoptimizationLiteral& other) const {
77     return kind_ == other.kind_ && object_.equals(other.object_) &&
78            bit_cast<uint64_t>(number_) == bit_cast<uint64_t>(other.number_) &&
79            bit_cast<intptr_t>(string_) == bit_cast<intptr_t>(other.string_);
80   }
81 
82   Handle<Object> Reify(Isolate* isolate) const;
83 
Validate()84   void Validate() const {
85     CHECK_NE(kind_, DeoptimizationLiteralKind::kInvalid);
86   }
87 
kind()88   DeoptimizationLiteralKind kind() const {
89     Validate();
90     return kind_;
91   }
92 
93  private:
94   DeoptimizationLiteralKind kind_;
95 
96   Handle<Object> object_;
97   double number_ = 0;
98   const StringConstantBase* string_ = nullptr;
99 };
100 
101 // These structs hold pc offsets for generated instructions and is only used
102 // when tracing for turbolizer is enabled.
103 struct TurbolizerCodeOffsetsInfo {
104   int code_start_register_check = -1;
105   int deopt_check = -1;
106   int init_poison = -1;
107   int blocks_start = -1;
108   int out_of_line_code = -1;
109   int deoptimization_exits = -1;
110   int pools = -1;
111   int jump_tables = -1;
112 };
113 
114 struct TurbolizerInstructionStartInfo {
115   int gap_pc_offset = -1;
116   int arch_instr_pc_offset = -1;
117   int condition_pc_offset = -1;
118 };
119 
120 // Generates native code for a sequence of instructions.
121 class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
122  public:
123   explicit CodeGenerator(
124       Zone* codegen_zone, Frame* frame, Linkage* linkage,
125       InstructionSequence* instructions, OptimizedCompilationInfo* info,
126       Isolate* isolate, base::Optional<OsrHelper> osr_helper,
127       int start_source_position, JumpOptimizationInfo* jump_opt,
128       PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options,
129       int32_t builtin_index, size_t max_unoptimized_frame_height,
130       size_t max_pushed_argument_count, std::unique_ptr<AssemblerBuffer> = {},
131       const char* debug_name = nullptr);
132 
133   // Generate native code. After calling AssembleCode, call FinalizeCode to
134   // produce the actual code object. If an error occurs during either phase,
135   // FinalizeCode returns an empty MaybeHandle.
136   void AssembleCode();  // Does not need to run on main thread.
137   MaybeHandle<Code> FinalizeCode();
138 
139   OwnedVector<byte> GetSourcePositionTable();
140   OwnedVector<byte> GetProtectedInstructionsData();
141 
instructions()142   InstructionSequence* instructions() const { return instructions_; }
frame_access_state()143   FrameAccessState* frame_access_state() const { return frame_access_state_; }
frame()144   const Frame* frame() const { return frame_access_state_->frame(); }
isolate()145   Isolate* isolate() const { return isolate_; }
linkage()146   Linkage* linkage() const { return linkage_; }
147 
GetLabel(RpoNumber rpo)148   Label* GetLabel(RpoNumber rpo) { return &labels_[rpo.ToSize()]; }
149 
150   void AddProtectedInstructionLanding(uint32_t instr_offset,
151                                       uint32_t landing_offset);
152 
153   bool wasm_runtime_exception_support() const;
154 
start_source_position()155   SourcePosition start_source_position() const {
156     return start_source_position_;
157   }
158 
159   void AssembleSourcePosition(Instruction* instr);
160   void AssembleSourcePosition(SourcePosition source_position);
161 
162   // Record a safepoint with the given pointer map.
163   void RecordSafepoint(ReferenceMap* references,
164                        Safepoint::DeoptMode deopt_mode);
165 
zone()166   Zone* zone() const { return zone_; }
tasm()167   TurboAssembler* tasm() { return &tasm_; }
safepoint_table_builder()168   SafepointTableBuilder* safepoint_table_builder() { return &safepoints_; }
GetSafepointTableOffset()169   size_t GetSafepointTableOffset() const { return safepoints_.GetCodeOffset(); }
GetHandlerTableOffset()170   size_t GetHandlerTableOffset() const { return handler_table_offset_; }
171 
block_starts()172   const ZoneVector<int>& block_starts() const { return block_starts_; }
instr_starts()173   const ZoneVector<TurbolizerInstructionStartInfo>& instr_starts() const {
174     return instr_starts_;
175   }
176 
offsets_info()177   const TurbolizerCodeOffsetsInfo& offsets_info() const {
178     return offsets_info_;
179   }
180 
181   static constexpr int kBinarySearchSwitchMinimalCases = 4;
182 
183   // Returns true if an offset should be applied to the given stack check. There
184   // are two reasons that this could happen:
185   // 1. The optimized frame is smaller than the corresponding deoptimized frames
186   //    and an offset must be applied in order to be able to deopt safely.
187   // 2. The current function pushes a large number of arguments to the stack.
188   //    These are not accounted for by the initial frame setup.
189   bool ShouldApplyOffsetToStackCheck(Instruction* instr, uint32_t* offset);
190   uint32_t GetStackCheckOffset();
191 
192  private:
resolver()193   GapResolver* resolver() { return &resolver_; }
safepoints()194   SafepointTableBuilder* safepoints() { return &safepoints_; }
info()195   OptimizedCompilationInfo* info() const { return info_; }
osr_helper()196   OsrHelper* osr_helper() { return &(*osr_helper_); }
197 
198   // Create the FrameAccessState object. The Frame is immutable from here on.
199   void CreateFrameAccessState(Frame* frame);
200 
201   // Architecture - specific frame finalization.
202   void FinishFrame(Frame* frame);
203 
204   // Checks if {block} will appear directly after {current_block_} when
205   // assembling code, in which case, a fall-through can be used.
206   bool IsNextInAssemblyOrder(RpoNumber block) const;
207 
208   // Check if a heap object can be materialized by loading from a heap root,
209   // which is cheaper on some platforms than materializing the actual heap
210   // object constant.
211   bool IsMaterializableFromRoot(Handle<HeapObject> object,
212                                 RootIndex* index_return);
213 
214   enum CodeGenResult { kSuccess, kTooManyDeoptimizationBailouts };
215 
216   // Assemble instructions for the specified block.
217   CodeGenResult AssembleBlock(const InstructionBlock* block);
218 
219   // Inserts mask update at the beginning of an instruction block if the
220   // predecessor blocks ends with a masking branch.
221   void TryInsertBranchPoisoning(const InstructionBlock* block);
222 
223   // Initializes the masking register in the prologue of a function.
224   void InitializeSpeculationPoison();
225   // Reset the masking register during execution of a function.
226   void ResetSpeculationPoison();
227   // Generates a mask from the pc passed in {kJavaScriptCallCodeStartRegister}.
228   void GenerateSpeculationPoisonFromCodeStartRegister();
229 
230   // Assemble code for the specified instruction.
231   CodeGenResult AssembleInstruction(int instruction_index,
232                                     const InstructionBlock* block);
233   void AssembleGaps(Instruction* instr);
234 
235   // Compute branch info from given instruction. Returns a valid rpo number
236   // if the branch is redundant, the returned rpo number point to the target
237   // basic block.
238   RpoNumber ComputeBranchInfo(BranchInfo* branch, Instruction* instr);
239 
240   // Returns true if a instruction is a tail call that needs to adjust the stack
241   // pointer before execution. The stack slot index to the empty slot above the
242   // adjusted stack pointer is returned in |slot|.
243   bool GetSlotAboveSPBeforeTailCall(Instruction* instr, int* slot);
244 
245   // Determines how to call helper stubs depending on the code kind.
246   StubCallMode DetermineStubCallMode() const;
247 
248   CodeGenResult AssembleDeoptimizerCall(DeoptimizationExit* exit);
249 
250   // ===========================================================================
251   // ============= Architecture-specific code generation methods. ==============
252   // ===========================================================================
253 
254   CodeGenResult AssembleArchInstruction(Instruction* instr);
255   void AssembleArchJump(RpoNumber target);
256   void AssembleArchBranch(Instruction* instr, BranchInfo* branch);
257 
258   // Generates special branch for deoptimization condition.
259   void AssembleArchDeoptBranch(Instruction* instr, BranchInfo* branch);
260 
261   void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
262   void AssembleArchTrap(Instruction* instr, FlagsCondition condition);
263   void AssembleArchBinarySearchSwitchRange(Register input, RpoNumber def_block,
264                                            std::pair<int32_t, Label*>* begin,
265                                            std::pair<int32_t, Label*>* end);
266   void AssembleArchBinarySearchSwitch(Instruction* instr);
267   void AssembleArchTableSwitch(Instruction* instr);
268 
269   // Generates code that checks whether the {kJavaScriptCallCodeStartRegister}
270   // contains the expected pointer to the start of the instruction stream.
271   void AssembleCodeStartRegisterCheck();
272 
273   void AssembleBranchPoisoning(FlagsCondition condition, Instruction* instr);
274 
275   // When entering a code that is marked for deoptimization, rather continuing
276   // with its execution, we jump to a lazy compiled code. We need to do this
277   // because this code has already been deoptimized and needs to be unlinked
278   // from the JS functions referring it.
279   void BailoutIfDeoptimized();
280 
281   // Generates code to poison the stack pointer and implicit register arguments
282   // like the context register and the function register.
283   void AssembleRegisterArgumentPoisoning();
284 
285   // Generates an architecture-specific, descriptor-specific prologue
286   // to set up a stack frame.
287   void AssembleConstructFrame();
288 
289   // Generates an architecture-specific, descriptor-specific return sequence
290   // to tear down a stack frame.
291   void AssembleReturn(InstructionOperand* pop);
292 
293   void AssembleDeconstructFrame();
294 
295   // Generates code to manipulate the stack in preparation for a tail call.
296   void AssemblePrepareTailCall();
297 
298   // Generates code to pop current frame if it is an arguments adaptor frame.
299   void AssemblePopArgumentsAdaptorFrame(Register args_reg, Register scratch1,
300                                         Register scratch2, Register scratch3);
301 
302   enum PushTypeFlag {
303     kImmediatePush = 0x1,
304     kRegisterPush = 0x2,
305     kStackSlotPush = 0x4,
306     kScalarPush = kRegisterPush | kStackSlotPush
307   };
308 
309   using PushTypeFlags = base::Flags<PushTypeFlag>;
310 
311   static bool IsValidPush(InstructionOperand source, PushTypeFlags push_type);
312 
313   // Generate a list of moves from an instruction that are candidates to be
314   // turned into push instructions on platforms that support them. In general,
315   // the list of push candidates are moves to a set of contiguous destination
316   // InstructionOperand locations on the stack that don't clobber values that
317   // are needed to resolve the gap or use values generated by the gap,
318   // i.e. moves that can be hoisted together before the actual gap and assembled
319   // together.
320   static void GetPushCompatibleMoves(Instruction* instr,
321                                      PushTypeFlags push_type,
322                                      ZoneVector<MoveOperands*>* pushes);
323 
324   class MoveType {
325    public:
326     enum Type {
327       kRegisterToRegister,
328       kRegisterToStack,
329       kStackToRegister,
330       kStackToStack,
331       kConstantToRegister,
332       kConstantToStack
333     };
334 
335     // Detect what type of move or swap needs to be performed. Note that these
336     // functions do not take into account the representation (Tagged, FP,
337     // ...etc).
338 
339     static Type InferMove(InstructionOperand* source,
340                           InstructionOperand* destination);
341     static Type InferSwap(InstructionOperand* source,
342                           InstructionOperand* destination);
343   };
344   // Called before a tail call |instr|'s gap moves are assembled and allows
345   // gap-specific pre-processing, e.g. adjustment of the sp for tail calls that
346   // need it before gap moves or conversion of certain gap moves into pushes.
347   void AssembleTailCallBeforeGap(Instruction* instr,
348                                  int first_unused_stack_slot);
349   // Called after a tail call |instr|'s gap moves are assembled and allows
350   // gap-specific post-processing, e.g. adjustment of the sp for tail calls that
351   // need it after gap moves.
352   void AssembleTailCallAfterGap(Instruction* instr,
353                                 int first_unused_stack_slot);
354 
355   void FinishCode();
356   void MaybeEmitOutOfLineConstantPool();
357 
358   void IncrementStackAccessCounter(InstructionOperand* source,
359                                    InstructionOperand* destination);
360 
361   // ===========================================================================
362   // ============== Architecture-specific gap resolver methods. ================
363   // ===========================================================================
364 
365   // Interface used by the gap resolver to emit moves and swaps.
366   void AssembleMove(InstructionOperand* source,
367                     InstructionOperand* destination) final;
368   void AssembleSwap(InstructionOperand* source,
369                     InstructionOperand* destination) final;
370 
371   // ===========================================================================
372   // =================== Jump table construction methods. ======================
373   // ===========================================================================
374 
375   class JumpTable;
376   // Adds a jump table that is emitted after the actual code.  Returns label
377   // pointing to the beginning of the table.  {targets} is assumed to be static
378   // or zone allocated.
379   Label* AddJumpTable(Label** targets, size_t target_count);
380   // Emits a jump table.
381   void AssembleJumpTable(Label** targets, size_t target_count);
382 
383   // ===========================================================================
384   // ================== Deoptimization table construction. =====================
385   // ===========================================================================
386 
387   void RecordCallPosition(Instruction* instr);
388   Handle<DeoptimizationData> GenerateDeoptimizationData();
389   int DefineDeoptimizationLiteral(DeoptimizationLiteral literal);
390   DeoptimizationEntry const& GetDeoptimizationEntry(Instruction* instr,
391                                                     size_t frame_state_offset);
392   DeoptimizationExit* BuildTranslation(Instruction* instr, int pc_offset,
393                                        size_t frame_state_offset,
394                                        OutputFrameStateCombine state_combine);
395   void BuildTranslationForFrameStateDescriptor(
396       FrameStateDescriptor* descriptor, InstructionOperandIterator* iter,
397       Translation* translation, OutputFrameStateCombine state_combine);
398   void TranslateStateValueDescriptor(StateValueDescriptor* desc,
399                                      StateValueList* nested,
400                                      Translation* translation,
401                                      InstructionOperandIterator* iter);
402   void TranslateFrameStateDescriptorOperands(FrameStateDescriptor* desc,
403                                              InstructionOperandIterator* iter,
404                                              Translation* translation);
405   void AddTranslationForOperand(Translation* translation, Instruction* instr,
406                                 InstructionOperand* op, MachineType type);
407   void MarkLazyDeoptSite();
408 
409   void PrepareForDeoptimizationExits(ZoneDeque<DeoptimizationExit*>* exits);
410   DeoptimizationExit* AddDeoptimizationExit(Instruction* instr,
411                                             size_t frame_state_offset);
412 
413   // ===========================================================================
414 
415   struct HandlerInfo {
416     Label* handler;
417     int pc_offset;
418   };
419 
420   friend class OutOfLineCode;
421   friend class CodeGeneratorTester;
422 
423   Zone* zone_;
424   Isolate* isolate_;
425   FrameAccessState* frame_access_state_;
426   Linkage* const linkage_;
427   InstructionSequence* const instructions_;
428   UnwindingInfoWriter unwinding_info_writer_;
429   OptimizedCompilationInfo* const info_;
430   Label* const labels_;
431   Label return_label_;
432   RpoNumber current_block_;
433   SourcePosition start_source_position_;
434   SourcePosition current_source_position_;
435   TurboAssembler tasm_;
436   GapResolver resolver_;
437   SafepointTableBuilder safepoints_;
438   ZoneVector<HandlerInfo> handlers_;
439   int next_deoptimization_id_ = 0;
440   int deopt_exit_start_offset_ = 0;
441   int non_lazy_deopt_count_ = 0;
442   ZoneDeque<DeoptimizationExit*> deoptimization_exits_;
443   ZoneDeque<DeoptimizationLiteral> deoptimization_literals_;
444   size_t inlined_function_count_ = 0;
445   TranslationBuffer translations_;
446   int handler_table_offset_ = 0;
447   int last_lazy_deopt_pc_ = 0;
448 
449   // Deoptimization exits must be as small as possible, since their count grows
450   // with function size. {jump_deoptimization_entry_labels_} is an optimization
451   // to that effect, which extracts the (potentially large) instruction
452   // sequence for the final jump to the deoptimization entry into a single spot
453   // per Code object. All deopt exits can then near-call to this label. Note:
454   // not used on all architectures.
455   Label jump_deoptimization_entry_labels_[kDeoptimizeKindCount];
456 
457   // The maximal combined height of all frames produced upon deoptimization, and
458   // the maximal number of pushed arguments for function calls. Applied as an
459   // offset to the first stack check of an optimized function.
460   const size_t max_unoptimized_frame_height_;
461   const size_t max_pushed_argument_count_;
462 
463   // kArchCallCFunction could be reached either:
464   //   kArchCallCFunction;
465   // or:
466   //   kArchSaveCallerRegisters;
467   //   kArchCallCFunction;
468   //   kArchRestoreCallerRegisters;
469   // The boolean is used to distinguish the two cases. In the latter case, we
470   // also need to decide if FP registers need to be saved, which is controlled
471   // by fp_mode_.
472   bool caller_registers_saved_;
473   SaveFPRegsMode fp_mode_;
474 
475   JumpTable* jump_tables_;
476   OutOfLineCode* ools_;
477   base::Optional<OsrHelper> osr_helper_;
478   int osr_pc_offset_;
479   int optimized_out_literal_id_;
480   SourcePositionTableBuilder source_position_table_builder_;
481   ZoneVector<trap_handler::ProtectedInstructionData> protected_instructions_;
482   CodeGenResult result_;
483   PoisoningMitigationLevel poisoning_level_;
484   ZoneVector<int> block_starts_;
485   TurbolizerCodeOffsetsInfo offsets_info_;
486   ZoneVector<TurbolizerInstructionStartInfo> instr_starts_;
487 
488   const char* debug_name_ = nullptr;
489 };
490 
491 }  // namespace compiler
492 }  // namespace internal
493 }  // namespace v8
494 
495 #endif  // V8_COMPILER_BACKEND_CODE_GENERATOR_H_
496