• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_CRANKSHAFT_MIPS64_LITHIUM_CODEGEN_MIPS_H_
6 #define V8_CRANKSHAFT_MIPS64_LITHIUM_CODEGEN_MIPS_H_
7 
8 #include "src/ast/scopes.h"
9 #include "src/crankshaft/lithium-codegen.h"
10 #include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h"
11 #include "src/crankshaft/mips64/lithium-mips64.h"
12 #include "src/deoptimizer.h"
13 #include "src/safepoint-table.h"
14 #include "src/utils.h"
15 
16 namespace v8 {
17 namespace internal {
18 
19 // Forward declarations.
20 class LDeferredCode;
21 class SafepointGenerator;
22 
23 class LCodeGen: public LCodeGenBase {
24  public:
LCodeGen(LChunk * chunk,MacroAssembler * assembler,CompilationInfo * info)25   LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
26       : LCodeGenBase(chunk, assembler, info),
27         jump_table_(4, info->zone()),
28         scope_(info->scope()),
29         deferred_(8, info->zone()),
30         frame_is_built_(false),
31         safepoints_(info->zone()),
32         resolver_(this),
33         expected_safepoint_kind_(Safepoint::kSimple) {
34     PopulateDeoptimizationLiteralsWithInlinedFunctions();
35   }
36 
37 
LookupDestination(int block_id)38   int LookupDestination(int block_id) const {
39     return chunk()->LookupDestination(block_id);
40   }
41 
IsNextEmittedBlock(int block_id)42   bool IsNextEmittedBlock(int block_id) const {
43     return LookupDestination(block_id) == GetNextEmittedBlock();
44   }
45 
NeedsEagerFrame()46   bool NeedsEagerFrame() const {
47     return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
48            !info()->IsStub() || info()->requires_frame();
49   }
NeedsDeferredFrame()50   bool NeedsDeferredFrame() const {
51     return !NeedsEagerFrame() && info()->is_deferred_calling();
52   }
53 
GetRAState()54   RAStatus GetRAState() const {
55     return frame_is_built_ ? kRAHasBeenSaved : kRAHasNotBeenSaved;
56   }
57 
58   // Support for converting LOperands to assembler types.
59   // LOperand must be a register.
60   Register ToRegister(LOperand* op) const;
61 
62   // LOperand is loaded into scratch, unless already a register.
63   Register EmitLoadRegister(LOperand* op, Register scratch);
64 
65   // LOperand must be a double register.
66   DoubleRegister ToDoubleRegister(LOperand* op) const;
67 
68   // LOperand is loaded into dbl_scratch, unless already a double register.
69   DoubleRegister EmitLoadDoubleRegister(LOperand* op,
70                                         FloatRegister flt_scratch,
71                                         DoubleRegister dbl_scratch);
72   int64_t ToRepresentation_donotuse(LConstantOperand* op,
73                                     const Representation& r) const;
74   int32_t ToInteger32(LConstantOperand* op) const;
75   Smi* ToSmi(LConstantOperand* op) const;
76   double ToDouble(LConstantOperand* op) const;
77   Operand ToOperand(LOperand* op);
78   MemOperand ToMemOperand(LOperand* op) const;
79   // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
80   MemOperand ToHighMemOperand(LOperand* op) const;
81 
82   bool IsInteger32(LConstantOperand* op) const;
83   bool IsSmi(LConstantOperand* op) const;
84   Handle<Object> ToHandle(LConstantOperand* op) const;
85 
86   // Try to generate code for the entire chunk, but it may fail if the
87   // chunk contains constructs we cannot handle. Returns true if the
88   // code generation attempt succeeded.
89   bool GenerateCode();
90 
91   // Finish the code by setting stack height, safepoint, and bailout
92   // information on it.
93   void FinishCode(Handle<Code> code);
94 
95   void DoDeferredNumberTagD(LNumberTagD* instr);
96 
97   enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
98   void DoDeferredNumberTagIU(LInstruction* instr,
99                              LOperand* value,
100                              LOperand* temp1,
101                              LOperand* temp2,
102                              IntegerSignedness signedness);
103 
104   void DoDeferredTaggedToI(LTaggedToI* instr);
105   void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
106   void DoDeferredStackCheck(LStackCheck* instr);
107   void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
108   void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
109   void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
110   void DoDeferredAllocate(LAllocate* instr);
111 
112   void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
113   void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
114                                    Register result,
115                                    Register object,
116                                    Register index);
117 
118   // Parallel move support.
119   void DoParallelMove(LParallelMove* move);
120   void DoGap(LGap* instr);
121 
122   MemOperand PrepareKeyedOperand(Register key,
123                                  Register base,
124                                  bool key_is_constant,
125                                  int constant_key,
126                                  int element_size,
127                                  int shift_size,
128                                  int base_offset);
129 
130   // Emit frame translation commands for an environment.
131   void WriteTranslation(LEnvironment* environment, Translation* translation);
132 
133   // Declare methods that deal with the individual node types.
134 #define DECLARE_DO(type) void Do##type(L##type* node);
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)135   LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
136 #undef DECLARE_DO
137 
138  private:
139   Scope* scope() const { return scope_; }
140 
scratch0()141   Register scratch0() { return kLithiumScratchReg; }
scratch1()142   Register scratch1() { return kLithiumScratchReg2; }
double_scratch0()143   DoubleRegister double_scratch0() { return kLithiumScratchDouble; }
144 
145   LInstruction* GetNextInstruction();
146 
147   void EmitClassOfTest(Label* if_true,
148                        Label* if_false,
149                        Handle<String> class_name,
150                        Register input,
151                        Register temporary,
152                        Register temporary2);
153 
HasAllocatedStackSlots()154   bool HasAllocatedStackSlots() const {
155     return chunk()->HasAllocatedStackSlots();
156   }
GetStackSlotCount()157   int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
GetTotalFrameSlotCount()158   int GetTotalFrameSlotCount() const {
159     return chunk()->GetTotalFrameSlotCount();
160   }
161 
AddDeferredCode(LDeferredCode * code)162   void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
163 
164   void SaveCallerDoubles();
165   void RestoreCallerDoubles();
166 
167   // Code generation passes.  Returns true if code generation should
168   // continue.
169   void GenerateBodyInstructionPre(LInstruction* instr) override;
170   bool GeneratePrologue();
171   bool GenerateDeferredCode();
172   bool GenerateJumpTable();
173   bool GenerateSafepointTable();
174 
175   // Generates the custom OSR entrypoint and sets the osr_pc_offset.
176   void GenerateOsrPrologue();
177 
178   enum SafepointMode {
179     RECORD_SIMPLE_SAFEPOINT,
180     RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
181   };
182 
183   void CallCode(Handle<Code> code,
184                 RelocInfo::Mode mode,
185                 LInstruction* instr);
186 
187   void CallCodeGeneric(Handle<Code> code,
188                        RelocInfo::Mode mode,
189                        LInstruction* instr,
190                        SafepointMode safepoint_mode);
191 
192   void CallRuntime(const Runtime::Function* function,
193                    int num_arguments,
194                    LInstruction* instr,
195                    SaveFPRegsMode save_doubles = kDontSaveFPRegs);
196 
CallRuntime(Runtime::FunctionId id,int num_arguments,LInstruction * instr)197   void CallRuntime(Runtime::FunctionId id,
198                    int num_arguments,
199                    LInstruction* instr) {
200     const Runtime::Function* function = Runtime::FunctionForId(id);
201     CallRuntime(function, num_arguments, instr);
202   }
203 
CallRuntime(Runtime::FunctionId id,LInstruction * instr)204   void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
205     const Runtime::Function* function = Runtime::FunctionForId(id);
206     CallRuntime(function, function->nargs, instr);
207   }
208 
209   void LoadContextFromDeferred(LOperand* context);
210   void CallRuntimeFromDeferred(Runtime::FunctionId id,
211                                int argc,
212                                LInstruction* instr,
213                                LOperand* context);
214 
215   void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
216                           Register scratch2, Register scratch3);
217 
218   // Generate a direct call to a known function.  Expects the function
219   // to be in a1.
220   void CallKnownFunction(Handle<JSFunction> function,
221                          int formal_parameter_count, int arity,
222                          bool is_tail_call, LInstruction* instr);
223 
224   void RecordSafepointWithLazyDeopt(LInstruction* instr,
225                                     SafepointMode safepoint_mode);
226 
227   void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
228                                             Safepoint::DeoptMode mode);
229   void DeoptimizeIf(Condition condition, LInstruction* instr,
230                     DeoptimizeReason deopt_reason,
231                     Deoptimizer::BailoutType bailout_type,
232                     Register src1 = zero_reg,
233                     const Operand& src2 = Operand(zero_reg));
234   void DeoptimizeIf(Condition condition, LInstruction* instr,
235                     DeoptimizeReason deopt_reason = DeoptimizeReason::kNoReason,
236                     Register src1 = zero_reg,
237                     const Operand& src2 = Operand(zero_reg));
238 
239   void AddToTranslation(LEnvironment* environment,
240                         Translation* translation,
241                         LOperand* op,
242                         bool is_tagged,
243                         bool is_uint32,
244                         int* object_index_pointer,
245                         int* dematerialized_index_pointer);
246 
247   Register ToRegister(int index) const;
248   DoubleRegister ToDoubleRegister(int index) const;
249 
250   MemOperand BuildSeqStringOperand(Register string,
251                                    LOperand* index,
252                                    String::Encoding encoding);
253 
254   void EmitIntegerMathAbs(LMathAbs* instr);
255   void EmitSmiMathAbs(LMathAbs* instr);
256 
257   // Support for recording safepoint information.
258   void RecordSafepoint(LPointerMap* pointers,
259                        Safepoint::Kind kind,
260                        int arguments,
261                        Safepoint::DeoptMode mode);
262   void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
263   void RecordSafepoint(Safepoint::DeoptMode mode);
264   void RecordSafepointWithRegisters(LPointerMap* pointers,
265                                     int arguments,
266                                     Safepoint::DeoptMode mode);
267 
268   static Condition TokenToCondition(Token::Value op, bool is_unsigned);
269   void EmitGoto(int block);
270 
271   // EmitBranch expects to be the last instruction of a block.
272   template<class InstrType>
273   void EmitBranch(InstrType instr,
274                   Condition condition,
275                   Register src1,
276                   const Operand& src2);
277   template<class InstrType>
278   void EmitBranchF(InstrType instr,
279                    Condition condition,
280                    FPURegister src1,
281                    FPURegister src2);
282   template <class InstrType>
283   void EmitTrueBranch(InstrType instr, Condition condition, Register src1,
284                       const Operand& src2);
285   template <class InstrType>
286   void EmitFalseBranch(InstrType instr, Condition condition, Register src1,
287                        const Operand& src2);
288   template<class InstrType>
289   void EmitFalseBranchF(InstrType instr,
290                         Condition condition,
291                         FPURegister src1,
292                         FPURegister src2);
293   void EmitCmpI(LOperand* left, LOperand* right);
294   void EmitNumberUntagD(LNumberUntagD* instr, Register input,
295                         DoubleRegister result, NumberUntagDMode mode);
296 
297   // Emits optimized code for typeof x == "y".  Modifies input register.
298   // Returns the condition on which a final split to
299   // true and false label should be made, to optimize fallthrough.
300   // Returns two registers in cmp1 and cmp2 that can be used in the
301   // Branch instruction after EmitTypeofIs.
302   Condition EmitTypeofIs(Label* true_label,
303                          Label* false_label,
304                          Register input,
305                          Handle<String> type_name,
306                          Register* cmp1,
307                          Operand* cmp2);
308 
309   // Emits optimized code for %_IsString(x).  Preserves input register.
310   // Returns the condition on which a final split to
311   // true and false label should be made, to optimize fallthrough.
312   Condition EmitIsString(Register input,
313                          Register temp1,
314                          Label* is_not_string,
315                          SmiCheck check_needed);
316 
317   // Emits optimized code to deep-copy the contents of statically known
318   // object graphs (e.g. object literal boilerplate).
319   void EmitDeepCopy(Handle<JSObject> object,
320                     Register result,
321                     Register source,
322                     int* offset,
323                     AllocationSiteMode mode);
324   // Emit optimized code for integer division.
325   // Inputs are signed.
326   // All registers are clobbered.
327   // If 'remainder' is no_reg, it is not computed.
328   void EmitSignedIntegerDivisionByConstant(Register result,
329                                            Register dividend,
330                                            int32_t divisor,
331                                            Register remainder,
332                                            Register scratch,
333                                            LEnvironment* environment);
334 
335 
336   void EnsureSpaceForLazyDeopt(int space_needed) override;
337   void DoLoadKeyedExternalArray(LLoadKeyed* instr);
338   void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
339   void DoLoadKeyedFixedArray(LLoadKeyed* instr);
340   void DoStoreKeyedExternalArray(LStoreKeyed* instr);
341   void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
342   void DoStoreKeyedFixedArray(LStoreKeyed* instr);
343 
344   template <class T>
345   void EmitVectorLoadICRegisters(T* instr);
346 
347   ZoneList<Deoptimizer::JumpTableEntry*> jump_table_;
348   Scope* const scope_;
349   ZoneList<LDeferredCode*> deferred_;
350   bool frame_is_built_;
351 
352   // Builder that keeps track of safepoints in the code. The table
353   // itself is emitted at the end of the generated code.
354   SafepointTableBuilder safepoints_;
355 
356   // Compiler from a set of parallel moves to a sequential list of moves.
357   LGapResolver resolver_;
358 
359   Safepoint::Kind expected_safepoint_kind_;
360 
361   class PushSafepointRegistersScope final BASE_EMBEDDED {
362    public:
363     explicit PushSafepointRegistersScope(LCodeGen* codegen);
364 
365     ~PushSafepointRegistersScope();
366 
367    private:
368     LCodeGen* codegen_;
369   };
370 
371   friend class LDeferredCode;
372   friend class LEnvironment;
373   friend class SafepointGenerator;
374   DISALLOW_COPY_AND_ASSIGN(LCodeGen);
375 };
376 
377 
378 class LDeferredCode : public ZoneObject {
379  public:
LDeferredCode(LCodeGen * codegen)380   explicit LDeferredCode(LCodeGen* codegen)
381       : codegen_(codegen),
382         external_exit_(NULL),
383         instruction_index_(codegen->current_instruction_) {
384     codegen->AddDeferredCode(this);
385   }
386 
~LDeferredCode()387   virtual ~LDeferredCode() {}
388   virtual void Generate() = 0;
389   virtual LInstruction* instr() = 0;
390 
SetExit(Label * exit)391   void SetExit(Label* exit) { external_exit_ = exit; }
entry()392   Label* entry() { return &entry_; }
exit()393   Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
instruction_index()394   int instruction_index() const { return instruction_index_; }
395 
396  protected:
codegen()397   LCodeGen* codegen() const { return codegen_; }
masm()398   MacroAssembler* masm() const { return codegen_->masm(); }
399 
400  private:
401   LCodeGen* codegen_;
402   Label entry_;
403   Label exit_;
404   Label* external_exit_;
405   int instruction_index_;
406 };
407 
408 }  // namespace internal
409 }  // namespace v8
410 
411 #endif  // V8_CRANKSHAFT_MIPS64_LITHIUM_CODEGEN_MIPS_H_
412