• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_COMPILER_BACKEND_CODE_GENERATOR_H_
6 #define V8_COMPILER_BACKEND_CODE_GENERATOR_H_
7 
8 #include <memory>
9 
10 #include "src/base/optional.h"
11 #include "src/codegen/macro-assembler.h"
12 #include "src/codegen/optimized-compilation-info.h"
13 #include "src/codegen/safepoint-table.h"
14 #include "src/codegen/source-position-table.h"
15 #include "src/compiler/backend/gap-resolver.h"
16 #include "src/compiler/backend/instruction.h"
17 #include "src/compiler/backend/unwinding-info-writer.h"
18 #include "src/compiler/osr.h"
19 #include "src/deoptimizer/deoptimizer.h"
20 #include "src/objects/code-kind.h"
21 #include "src/trap-handler/trap-handler.h"
22 
23 namespace v8 {
24 namespace internal {
25 
26 namespace compiler {
27 
28 // Forward declarations.
29 class DeoptimizationExit;
30 class FrameAccessState;
31 class Linkage;
32 class OutOfLineCode;
33 
34 struct BranchInfo {
35   FlagsCondition condition;
36   Label* true_label;
37   Label* false_label;
38   bool fallthru;
39 };
40 
41 class InstructionOperandIterator {
42  public:
InstructionOperandIterator(Instruction * instr,size_t pos)43   InstructionOperandIterator(Instruction* instr, size_t pos)
44       : instr_(instr), pos_(pos) {}
45 
instruction()46   Instruction* instruction() const { return instr_; }
Advance()47   InstructionOperand* Advance() { return instr_->InputAt(pos_++); }
48 
49  private:
50   Instruction* instr_;
51   size_t pos_;
52 };
53 
54 enum class DeoptimizationLiteralKind { kObject, kNumber, kString, kInvalid };
55 
56 // Either a non-null Handle<Object>, a double or a StringConstantBase.
57 class DeoptimizationLiteral {
58  public:
DeoptimizationLiteral()59   DeoptimizationLiteral()
60       : kind_(DeoptimizationLiteralKind::kInvalid),
61         object_(),
62         number_(0),
63         string_(nullptr) {}
DeoptimizationLiteral(Handle<Object> object)64   explicit DeoptimizationLiteral(Handle<Object> object)
65       : kind_(DeoptimizationLiteralKind::kObject), object_(object) {
66     CHECK(!object_.is_null());
67   }
DeoptimizationLiteral(double number)68   explicit DeoptimizationLiteral(double number)
69       : kind_(DeoptimizationLiteralKind::kNumber), number_(number) {}
DeoptimizationLiteral(const StringConstantBase * string)70   explicit DeoptimizationLiteral(const StringConstantBase* string)
71       : kind_(DeoptimizationLiteralKind::kString), string_(string) {}
72 
object()73   Handle<Object> object() const { return object_; }
string()74   const StringConstantBase* string() const { return string_; }
75 
76   bool operator==(const DeoptimizationLiteral& other) const {
77     return kind_ == other.kind_ && object_.equals(other.object_) &&
78            bit_cast<uint64_t>(number_) == bit_cast<uint64_t>(other.number_) &&
79            bit_cast<intptr_t>(string_) == bit_cast<intptr_t>(other.string_);
80   }
81 
82   Handle<Object> Reify(Isolate* isolate) const;
83 
Validate()84   void Validate() const {
85     CHECK_NE(kind_, DeoptimizationLiteralKind::kInvalid);
86   }
87 
kind()88   DeoptimizationLiteralKind kind() const {
89     Validate();
90     return kind_;
91   }
92 
93  private:
94   DeoptimizationLiteralKind kind_;
95 
96   Handle<Object> object_;
97   double number_ = 0;
98   const StringConstantBase* string_ = nullptr;
99 };
100 
101 // These structs hold pc offsets for generated instructions and is only used
102 // when tracing for turbolizer is enabled.
103 struct TurbolizerCodeOffsetsInfo {
104   int code_start_register_check = -1;
105   int deopt_check = -1;
106   int blocks_start = -1;
107   int out_of_line_code = -1;
108   int deoptimization_exits = -1;
109   int pools = -1;
110   int jump_tables = -1;
111 };
112 
113 struct TurbolizerInstructionStartInfo {
114   int gap_pc_offset = -1;
115   int arch_instr_pc_offset = -1;
116   int condition_pc_offset = -1;
117 };
118 
119 // Generates native code for a sequence of instructions.
120 class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
121  public:
122   explicit CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
123                          InstructionSequence* instructions,
124                          OptimizedCompilationInfo* info, Isolate* isolate,
125                          base::Optional<OsrHelper> osr_helper,
126                          int start_source_position,
127                          JumpOptimizationInfo* jump_opt,
128                          const AssemblerOptions& options, Builtin builtin,
129                          size_t max_unoptimized_frame_height,
130                          size_t max_pushed_argument_count,
131                          const char* debug_name = nullptr);
132 
133   // Generate native code. After calling AssembleCode, call FinalizeCode to
134   // produce the actual code object. If an error occurs during either phase,
135   // FinalizeCode returns an empty MaybeHandle.
136   void AssembleCode();  // Does not need to run on main thread.
137   MaybeHandle<Code> FinalizeCode();
138 
139   base::OwnedVector<byte> GetSourcePositionTable();
140   base::OwnedVector<byte> GetProtectedInstructionsData();
141 
instructions()142   InstructionSequence* instructions() const { return instructions_; }
frame_access_state()143   FrameAccessState* frame_access_state() const { return frame_access_state_; }
frame()144   const Frame* frame() const { return frame_access_state_->frame(); }
isolate()145   Isolate* isolate() const { return isolate_; }
linkage()146   Linkage* linkage() const { return linkage_; }
147 
GetLabel(RpoNumber rpo)148   Label* GetLabel(RpoNumber rpo) { return &labels_[rpo.ToSize()]; }
149 
150   void AddProtectedInstructionLanding(uint32_t instr_offset,
151                                       uint32_t landing_offset);
152 
153   bool wasm_runtime_exception_support() const;
154 
start_source_position()155   SourcePosition start_source_position() const {
156     return start_source_position_;
157   }
158 
159   void AssembleSourcePosition(Instruction* instr);
160   void AssembleSourcePosition(SourcePosition source_position);
161 
162   // Record a safepoint with the given pointer map.
163   void RecordSafepoint(ReferenceMap* references);
164 
zone()165   Zone* zone() const { return zone_; }
tasm()166   TurboAssembler* tasm() { return &tasm_; }
safepoint_table_builder()167   SafepointTableBuilder* safepoint_table_builder() { return &safepoints_; }
handler_table_offset()168   size_t handler_table_offset() const { return handler_table_offset_; }
169 
block_starts()170   const ZoneVector<int>& block_starts() const { return block_starts_; }
instr_starts()171   const ZoneVector<TurbolizerInstructionStartInfo>& instr_starts() const {
172     return instr_starts_;
173   }
174 
offsets_info()175   const TurbolizerCodeOffsetsInfo& offsets_info() const {
176     return offsets_info_;
177   }
178 
179   static constexpr int kBinarySearchSwitchMinimalCases = 4;
180 
181   // Returns true if an offset should be applied to the given stack check. There
182   // are two reasons that this could happen:
183   // 1. The optimized frame is smaller than the corresponding deoptimized frames
184   //    and an offset must be applied in order to be able to deopt safely.
185   // 2. The current function pushes a large number of arguments to the stack.
186   //    These are not accounted for by the initial frame setup.
187   bool ShouldApplyOffsetToStackCheck(Instruction* instr, uint32_t* offset);
188   uint32_t GetStackCheckOffset();
189 
code_kind()190   CodeKind code_kind() const { return info_->code_kind(); }
191 
192  private:
resolver()193   GapResolver* resolver() { return &resolver_; }
safepoints()194   SafepointTableBuilder* safepoints() { return &safepoints_; }
info()195   OptimizedCompilationInfo* info() const { return info_; }
osr_helper()196   OsrHelper* osr_helper() { return &(*osr_helper_); }
197 
198   // Create the FrameAccessState object. The Frame is immutable from here on.
199   void CreateFrameAccessState(Frame* frame);
200 
201   // Architecture - specific frame finalization.
202   void FinishFrame(Frame* frame);
203 
204   // Checks if {block} will appear directly after {current_block_} when
205   // assembling code, in which case, a fall-through can be used.
206   bool IsNextInAssemblyOrder(RpoNumber block) const;
207 
208   // Check if a heap object can be materialized by loading from a heap root,
209   // which is cheaper on some platforms than materializing the actual heap
210   // object constant.
211   bool IsMaterializableFromRoot(Handle<HeapObject> object,
212                                 RootIndex* index_return);
213 
214   enum CodeGenResult { kSuccess, kTooManyDeoptimizationBailouts };
215 
216   // Assemble instructions for the specified block.
217   CodeGenResult AssembleBlock(const InstructionBlock* block);
218 
219   // Assemble code for the specified instruction.
220   CodeGenResult AssembleInstruction(int instruction_index,
221                                     const InstructionBlock* block);
222   void AssembleGaps(Instruction* instr);
223 
224   // Compute branch info from given instruction. Returns a valid rpo number
225   // if the branch is redundant, the returned rpo number point to the target
226   // basic block.
227   RpoNumber ComputeBranchInfo(BranchInfo* branch, Instruction* instr);
228 
229   // Returns true if a instruction is a tail call that needs to adjust the stack
230   // pointer before execution. The stack slot index to the empty slot above the
231   // adjusted stack pointer is returned in |slot|.
232   bool GetSlotAboveSPBeforeTailCall(Instruction* instr, int* slot);
233 
234   // Determines how to call helper stubs depending on the code kind.
235   StubCallMode DetermineStubCallMode() const;
236 
237   CodeGenResult AssembleDeoptimizerCall(DeoptimizationExit* exit);
238 
239   // ===========================================================================
240   // ============= Architecture-specific code generation methods. ==============
241   // ===========================================================================
242 
243   CodeGenResult AssembleArchInstruction(Instruction* instr);
244   void AssembleArchJump(RpoNumber target);
245   void AssembleArchJumpRegardlessOfAssemblyOrder(RpoNumber target);
246   void AssembleArchBranch(Instruction* instr, BranchInfo* branch);
247 
248   // Generates special branch for deoptimization condition.
249   void AssembleArchDeoptBranch(Instruction* instr, BranchInfo* branch);
250 
251   void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
252   void AssembleArchSelect(Instruction* instr, FlagsCondition condition);
253 #if V8_ENABLE_WEBASSEMBLY
254   void AssembleArchTrap(Instruction* instr, FlagsCondition condition);
255 #endif  // V8_ENABLE_WEBASSEMBLY
256   void AssembleArchBinarySearchSwitchRange(Register input, RpoNumber def_block,
257                                            std::pair<int32_t, Label*>* begin,
258                                            std::pair<int32_t, Label*>* end);
259   void AssembleArchBinarySearchSwitch(Instruction* instr);
260   void AssembleArchTableSwitch(Instruction* instr);
261 
262   // Generates code that checks whether the {kJavaScriptCallCodeStartRegister}
263   // contains the expected pointer to the start of the instruction stream.
264   void AssembleCodeStartRegisterCheck();
265 
266   // When entering a code that is marked for deoptimization, rather continuing
267   // with its execution, we jump to a lazy compiled code. We need to do this
268   // because this code has already been deoptimized and needs to be unlinked
269   // from the JS functions referring it.
270   void BailoutIfDeoptimized();
271 
272   // Generates an architecture-specific, descriptor-specific prologue
273   // to set up a stack frame.
274   void AssembleConstructFrame();
275 
276   // Generates an architecture-specific, descriptor-specific return sequence
277   // to tear down a stack frame.
278   void AssembleReturn(InstructionOperand* pop);
279 
280   void AssembleDeconstructFrame();
281 
282   // Generates code to manipulate the stack in preparation for a tail call.
283   void AssemblePrepareTailCall();
284 
285   enum PushTypeFlag {
286     kImmediatePush = 0x1,
287     kRegisterPush = 0x2,
288     kStackSlotPush = 0x4,
289     kScalarPush = kRegisterPush | kStackSlotPush
290   };
291 
292   using PushTypeFlags = base::Flags<PushTypeFlag>;
293 
294   static bool IsValidPush(InstructionOperand source, PushTypeFlags push_type);
295 
296   // Generate a list of moves from an instruction that are candidates to be
297   // turned into push instructions on platforms that support them. In general,
298   // the list of push candidates are moves to a set of contiguous destination
299   // InstructionOperand locations on the stack that don't clobber values that
300   // are needed to resolve the gap or use values generated by the gap,
301   // i.e. moves that can be hoisted together before the actual gap and assembled
302   // together.
303   static void GetPushCompatibleMoves(Instruction* instr,
304                                      PushTypeFlags push_type,
305                                      ZoneVector<MoveOperands*>* pushes);
306 
307   class MoveType {
308    public:
309     enum Type {
310       kRegisterToRegister,
311       kRegisterToStack,
312       kStackToRegister,
313       kStackToStack,
314       kConstantToRegister,
315       kConstantToStack
316     };
317 
318     // Detect what type of move or swap needs to be performed. Note that these
319     // functions do not take into account the representation (Tagged, FP,
320     // ...etc).
321 
322     static Type InferMove(InstructionOperand* source,
323                           InstructionOperand* destination);
324     static Type InferSwap(InstructionOperand* source,
325                           InstructionOperand* destination);
326   };
327   // Called before a tail call |instr|'s gap moves are assembled and allows
328   // gap-specific pre-processing, e.g. adjustment of the sp for tail calls that
329   // need it before gap moves or conversion of certain gap moves into pushes.
330   void AssembleTailCallBeforeGap(Instruction* instr,
331                                  int first_unused_stack_slot);
332   // Called after a tail call |instr|'s gap moves are assembled and allows
333   // gap-specific post-processing, e.g. adjustment of the sp for tail calls that
334   // need it after gap moves.
335   void AssembleTailCallAfterGap(Instruction* instr,
336                                 int first_unused_stack_slot);
337 
338   void FinishCode();
339   void MaybeEmitOutOfLineConstantPool();
340 
341   void IncrementStackAccessCounter(InstructionOperand* source,
342                                    InstructionOperand* destination);
343 
344   // ===========================================================================
345   // ============== Architecture-specific gap resolver methods. ================
346   // ===========================================================================
347 
348   // Interface used by the gap resolver to emit moves and swaps.
349   void AssembleMove(InstructionOperand* source,
350                     InstructionOperand* destination) final;
351   void AssembleSwap(InstructionOperand* source,
352                     InstructionOperand* destination) final;
353 
354   // ===========================================================================
355   // =================== Jump table construction methods. ======================
356   // ===========================================================================
357 
358   class JumpTable;
359   // Adds a jump table that is emitted after the actual code.  Returns label
360   // pointing to the beginning of the table.  {targets} is assumed to be static
361   // or zone allocated.
362   Label* AddJumpTable(Label** targets, size_t target_count);
363   // Emits a jump table.
364   void AssembleJumpTable(Label** targets, size_t target_count);
365 
366   // ===========================================================================
367   // ================== Deoptimization table construction. =====================
368   // ===========================================================================
369 
370   void RecordCallPosition(Instruction* instr);
371   Handle<DeoptimizationData> GenerateDeoptimizationData();
372   int DefineDeoptimizationLiteral(DeoptimizationLiteral literal);
373   DeoptimizationEntry const& GetDeoptimizationEntry(Instruction* instr,
374                                                     size_t frame_state_offset);
375   DeoptimizationExit* BuildTranslation(Instruction* instr, int pc_offset,
376                                        size_t frame_state_offset,
377                                        size_t immediate_args_count,
378                                        OutputFrameStateCombine state_combine);
379   void BuildTranslationForFrameStateDescriptor(
380       FrameStateDescriptor* descriptor, InstructionOperandIterator* iter,
381       OutputFrameStateCombine state_combine);
382   void TranslateStateValueDescriptor(StateValueDescriptor* desc,
383                                      StateValueList* nested,
384                                      InstructionOperandIterator* iter);
385   void TranslateFrameStateDescriptorOperands(FrameStateDescriptor* desc,
386                                              InstructionOperandIterator* iter);
387   void AddTranslationForOperand(Instruction* instr, InstructionOperand* op,
388                                 MachineType type);
389   void MarkLazyDeoptSite();
390 
391   void PrepareForDeoptimizationExits(ZoneDeque<DeoptimizationExit*>* exits);
392   DeoptimizationExit* AddDeoptimizationExit(Instruction* instr,
393                                             size_t frame_state_offset,
394                                             size_t immediate_args_count);
395 
396   // ===========================================================================
397 
398   struct HandlerInfo {
399     Label* handler;
400     int pc_offset;
401   };
402 
403   friend class OutOfLineCode;
404   friend class CodeGeneratorTester;
405 
406   Zone* zone_;
407   Isolate* isolate_;
408   FrameAccessState* frame_access_state_;
409   Linkage* const linkage_;
410   InstructionSequence* const instructions_;
411   UnwindingInfoWriter unwinding_info_writer_;
412   OptimizedCompilationInfo* const info_;
413   Label* const labels_;
414   Label return_label_;
415   RpoNumber current_block_;
416   SourcePosition start_source_position_;
417   SourcePosition current_source_position_;
418   TurboAssembler tasm_;
419   GapResolver resolver_;
420   SafepointTableBuilder safepoints_;
421   ZoneVector<HandlerInfo> handlers_;
422   int next_deoptimization_id_ = 0;
423   int deopt_exit_start_offset_ = 0;
424   int eager_deopt_count_ = 0;
425   int lazy_deopt_count_ = 0;
426   ZoneDeque<DeoptimizationExit*> deoptimization_exits_;
427   ZoneDeque<DeoptimizationLiteral> deoptimization_literals_;
428   size_t inlined_function_count_ = 0;
429   TranslationArrayBuilder translations_;
430   int handler_table_offset_ = 0;
431   int last_lazy_deopt_pc_ = 0;
432 
433   // Deoptimization exits must be as small as possible, since their count grows
434   // with function size. {jump_deoptimization_entry_labels_} is an optimization
435   // to that effect, which extracts the (potentially large) instruction
436   // sequence for the final jump to the deoptimization entry into a single spot
437   // per Code object. All deopt exits can then near-call to this label. Note:
438   // not used on all architectures.
439   Label jump_deoptimization_entry_labels_[kDeoptimizeKindCount];
440 
441   // The maximal combined height of all frames produced upon deoptimization, and
442   // the maximal number of pushed arguments for function calls. Applied as an
443   // offset to the first stack check of an optimized function.
444   const size_t max_unoptimized_frame_height_;
445   const size_t max_pushed_argument_count_;
446 
447   // kArchCallCFunction could be reached either:
448   //   kArchCallCFunction;
449   // or:
450   //   kArchSaveCallerRegisters;
451   //   kArchCallCFunction;
452   //   kArchRestoreCallerRegisters;
453   // The boolean is used to distinguish the two cases. In the latter case, we
454   // also need to decide if FP registers need to be saved, which is controlled
455   // by fp_mode_.
456   bool caller_registers_saved_;
457   SaveFPRegsMode fp_mode_;
458 
459   JumpTable* jump_tables_;
460   OutOfLineCode* ools_;
461   base::Optional<OsrHelper> osr_helper_;
462   int osr_pc_offset_;
463   int optimized_out_literal_id_;
464   SourcePositionTableBuilder source_position_table_builder_;
465   ZoneVector<trap_handler::ProtectedInstructionData> protected_instructions_;
466   CodeGenResult result_;
467   ZoneVector<int> block_starts_;
468   TurbolizerCodeOffsetsInfo offsets_info_;
469   ZoneVector<TurbolizerInstructionStartInfo> instr_starts_;
470 
471   const char* debug_name_ = nullptr;
472 };
473 
474 }  // namespace compiler
475 }  // namespace internal
476 }  // namespace v8
477 
478 #endif  // V8_COMPILER_BACKEND_CODE_GENERATOR_H_
479