• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/code-generator.h"
6 
7 #include "src/address-map.h"
8 #include "src/base/adapters.h"
9 #include "src/compilation-info.h"
10 #include "src/compiler/code-generator-impl.h"
11 #include "src/compiler/linkage.h"
12 #include "src/compiler/pipeline.h"
13 #include "src/frames-inl.h"
14 
15 namespace v8 {
16 namespace internal {
17 namespace compiler {
18 
19 class CodeGenerator::JumpTable final : public ZoneObject {
20  public:
JumpTable(JumpTable * next,Label ** targets,size_t target_count)21   JumpTable(JumpTable* next, Label** targets, size_t target_count)
22       : next_(next), targets_(targets), target_count_(target_count) {}
23 
label()24   Label* label() { return &label_; }
next() const25   JumpTable* next() const { return next_; }
targets() const26   Label** targets() const { return targets_; }
target_count() const27   size_t target_count() const { return target_count_; }
28 
29  private:
30   Label label_;
31   JumpTable* const next_;
32   Label** const targets_;
33   size_t const target_count_;
34 };
35 
CodeGenerator(Frame * frame,Linkage * linkage,InstructionSequence * code,CompilationInfo * info)36 CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
37                              InstructionSequence* code, CompilationInfo* info)
38     : frame_access_state_(nullptr),
39       linkage_(linkage),
40       code_(code),
41       unwinding_info_writer_(zone()),
42       info_(info),
43       labels_(zone()->NewArray<Label>(code->InstructionBlockCount())),
44       current_block_(RpoNumber::Invalid()),
45       current_source_position_(SourcePosition::Unknown()),
46       masm_(info->isolate(), nullptr, 0, CodeObjectRequired::kNo),
47       resolver_(this),
48       safepoints_(code->zone()),
49       handlers_(code->zone()),
50       deoptimization_exits_(code->zone()),
51       deoptimization_states_(code->zone()),
52       deoptimization_literals_(code->zone()),
53       inlined_function_count_(0),
54       translations_(code->zone()),
55       last_lazy_deopt_pc_(0),
56       jump_tables_(nullptr),
57       ools_(nullptr),
58       osr_pc_offset_(-1),
59       source_position_table_builder_(code->zone(),
60                                      info->SourcePositionRecordingMode()) {
61   for (int i = 0; i < code->InstructionBlockCount(); ++i) {
62     new (&labels_[i]) Label;
63   }
64   CreateFrameAccessState(frame);
65 }
66 
isolate() const67 Isolate* CodeGenerator::isolate() const { return info_->isolate(); }
68 
CreateFrameAccessState(Frame * frame)69 void CodeGenerator::CreateFrameAccessState(Frame* frame) {
70   FinishFrame(frame);
71   frame_access_state_ = new (code()->zone()) FrameAccessState(frame);
72 }
73 
GenerateCode()74 Handle<Code> CodeGenerator::GenerateCode() {
75   CompilationInfo* info = this->info();
76 
77   // Open a frame scope to indicate that there is a frame on the stack.  The
78   // MANUAL indicates that the scope shouldn't actually generate code to set up
79   // the frame (that is done in AssemblePrologue).
80   FrameScope frame_scope(masm(), StackFrame::MANUAL);
81 
82   // Place function entry hook if requested to do so.
83   if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
84     ProfileEntryHookStub::MaybeCallEntryHook(masm());
85   }
86   // Architecture-specific, linkage-specific prologue.
87   info->set_prologue_offset(masm()->pc_offset());
88 
89   // Define deoptimization literals for all inlined functions.
90   DCHECK_EQ(0u, deoptimization_literals_.size());
91   for (CompilationInfo::InlinedFunctionHolder& inlined :
92        info->inlined_functions()) {
93     if (!inlined.shared_info.is_identical_to(info->shared_info())) {
94       int index = DefineDeoptimizationLiteral(inlined.shared_info);
95       inlined.RegisterInlinedFunctionId(index);
96     }
97   }
98   inlined_function_count_ = deoptimization_literals_.size();
99 
100   // Define deoptimization literals for all unoptimized code objects of inlined
101   // functions. This ensures unoptimized code is kept alive by optimized code.
102   for (const CompilationInfo::InlinedFunctionHolder& inlined :
103        info->inlined_functions()) {
104     if (!inlined.shared_info.is_identical_to(info->shared_info())) {
105       DefineDeoptimizationLiteral(inlined.inlined_code_object_root);
106     }
107   }
108 
109   unwinding_info_writer_.SetNumberOfInstructionBlocks(
110       code()->InstructionBlockCount());
111 
112   // Assemble all non-deferred blocks, followed by deferred ones.
113   for (int deferred = 0; deferred < 2; ++deferred) {
114     for (const InstructionBlock* block : code()->instruction_blocks()) {
115       if (block->IsDeferred() == (deferred == 0)) {
116         continue;
117       }
118       // Align loop headers on 16-byte boundaries.
119       if (block->IsLoopHeader()) masm()->Align(16);
120       // Ensure lazy deopt doesn't patch handler entry points.
121       if (block->IsHandler()) EnsureSpaceForLazyDeopt();
122       // Bind a label for a block.
123       current_block_ = block->rpo_number();
124       unwinding_info_writer_.BeginInstructionBlock(masm()->pc_offset(), block);
125       if (FLAG_code_comments) {
126         // TODO(titzer): these code comments are a giant memory leak.
127         Vector<char> buffer = Vector<char>::New(200);
128         char* buffer_start = buffer.start();
129 
130         int next = SNPrintF(
131             buffer, "-- B%d start%s%s%s%s", block->rpo_number().ToInt(),
132             block->IsDeferred() ? " (deferred)" : "",
133             block->needs_frame() ? "" : " (no frame)",
134             block->must_construct_frame() ? " (construct frame)" : "",
135             block->must_deconstruct_frame() ? " (deconstruct frame)" : "");
136 
137         buffer = buffer.SubVector(next, buffer.length());
138 
139         if (block->IsLoopHeader()) {
140           next =
141               SNPrintF(buffer, " (loop up to %d)", block->loop_end().ToInt());
142           buffer = buffer.SubVector(next, buffer.length());
143         }
144         if (block->loop_header().IsValid()) {
145           next =
146               SNPrintF(buffer, " (in loop %d)", block->loop_header().ToInt());
147           buffer = buffer.SubVector(next, buffer.length());
148         }
149         SNPrintF(buffer, " --");
150         masm()->RecordComment(buffer_start);
151       }
152 
153       frame_access_state()->MarkHasFrame(block->needs_frame());
154 
155       masm()->bind(GetLabel(current_block_));
156       if (block->must_construct_frame()) {
157         AssembleConstructFrame();
158         // We need to setup the root register after we assemble the prologue, to
159         // avoid clobbering callee saved registers in case of C linkage and
160         // using the roots.
161         // TODO(mtrofin): investigate how we can avoid doing this repeatedly.
162         if (linkage()->GetIncomingDescriptor()->InitializeRootRegister()) {
163           masm()->InitializeRootRegister();
164         }
165       }
166 
167       CodeGenResult result;
168       if (FLAG_enable_embedded_constant_pool && !block->needs_frame()) {
169         ConstantPoolUnavailableScope constant_pool_unavailable(masm());
170         result = AssembleBlock(block);
171       } else {
172         result = AssembleBlock(block);
173       }
174       if (result != kSuccess) return Handle<Code>();
175       unwinding_info_writer_.EndInstructionBlock(block);
176     }
177   }
178 
179   // Assemble all out-of-line code.
180   if (ools_) {
181     masm()->RecordComment("-- Out of line code --");
182     for (OutOfLineCode* ool = ools_; ool; ool = ool->next()) {
183       masm()->bind(ool->entry());
184       ool->Generate();
185       if (ool->exit()->is_bound()) masm()->jmp(ool->exit());
186     }
187   }
188 
189   // Assemble all eager deoptimization exits.
190   for (DeoptimizationExit* exit : deoptimization_exits_) {
191     masm()->bind(exit->label());
192     AssembleDeoptimizerCall(exit->deoptimization_id(), Deoptimizer::EAGER,
193                             exit->pos());
194   }
195 
196   // Ensure there is space for lazy deoptimization in the code.
197   if (info->ShouldEnsureSpaceForLazyDeopt()) {
198     int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
199     while (masm()->pc_offset() < target_offset) {
200       masm()->nop();
201     }
202   }
203 
204   FinishCode(masm());
205 
206   // Emit the jump tables.
207   if (jump_tables_) {
208     masm()->Align(kPointerSize);
209     for (JumpTable* table = jump_tables_; table; table = table->next()) {
210       masm()->bind(table->label());
211       AssembleJumpTable(table->targets(), table->target_count());
212     }
213   }
214 
215   safepoints()->Emit(masm(), frame()->GetTotalFrameSlotCount());
216 
217   unwinding_info_writer_.Finish(masm()->pc_offset());
218 
219   Handle<Code> result = v8::internal::CodeGenerator::MakeCodeEpilogue(
220       masm(), unwinding_info_writer_.eh_frame_writer(), info, Handle<Object>());
221   result->set_is_turbofanned(true);
222   result->set_stack_slots(frame()->GetTotalFrameSlotCount());
223   result->set_safepoint_table_offset(safepoints()->GetCodeOffset());
224   Handle<ByteArray> source_positions =
225       source_position_table_builder_.ToSourcePositionTable(
226           isolate(), Handle<AbstractCode>::cast(result));
227   result->set_source_position_table(*source_positions);
228 
229   // Emit exception handler table.
230   if (!handlers_.empty()) {
231     Handle<HandlerTable> table =
232         Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
233             HandlerTable::LengthForReturn(static_cast<int>(handlers_.size())),
234             TENURED));
235     for (size_t i = 0; i < handlers_.size(); ++i) {
236       table->SetReturnOffset(static_cast<int>(i), handlers_[i].pc_offset);
237       table->SetReturnHandler(static_cast<int>(i), handlers_[i].handler->pos());
238     }
239     result->set_handler_table(*table);
240   }
241 
242   PopulateDeoptimizationData(result);
243 
244   // Ensure there is space for lazy deoptimization in the relocation info.
245   if (info->ShouldEnsureSpaceForLazyDeopt()) {
246     Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(result);
247   }
248 
249   return result;
250 }
251 
252 
IsNextInAssemblyOrder(RpoNumber block) const253 bool CodeGenerator::IsNextInAssemblyOrder(RpoNumber block) const {
254   return code()
255       ->InstructionBlockAt(current_block_)
256       ->ao_number()
257       .IsNext(code()->InstructionBlockAt(block)->ao_number());
258 }
259 
260 
RecordSafepoint(ReferenceMap * references,Safepoint::Kind kind,int arguments,Safepoint::DeoptMode deopt_mode)261 void CodeGenerator::RecordSafepoint(ReferenceMap* references,
262                                     Safepoint::Kind kind, int arguments,
263                                     Safepoint::DeoptMode deopt_mode) {
264   Safepoint safepoint =
265       safepoints()->DefineSafepoint(masm(), kind, arguments, deopt_mode);
266   int stackSlotToSpillSlotDelta =
267       frame()->GetTotalFrameSlotCount() - frame()->GetSpillSlotCount();
268   for (const InstructionOperand& operand : references->reference_operands()) {
269     if (operand.IsStackSlot()) {
270       int index = LocationOperand::cast(operand).index();
271       DCHECK(index >= 0);
272       // We might index values in the fixed part of the frame (i.e. the
273       // closure pointer or the context pointer); these are not spill slots
274       // and therefore don't work with the SafepointTable currently, but
275       // we also don't need to worry about them, since the GC has special
276       // knowledge about those fields anyway.
277       if (index < stackSlotToSpillSlotDelta) continue;
278       safepoint.DefinePointerSlot(index, zone());
279     } else if (operand.IsRegister() && (kind & Safepoint::kWithRegisters)) {
280       Register reg = LocationOperand::cast(operand).GetRegister();
281       safepoint.DefinePointerRegister(reg, zone());
282     }
283   }
284 }
285 
IsMaterializableFromRoot(Handle<HeapObject> object,Heap::RootListIndex * index_return)286 bool CodeGenerator::IsMaterializableFromRoot(
287     Handle<HeapObject> object, Heap::RootListIndex* index_return) {
288   const CallDescriptor* incoming_descriptor =
289       linkage()->GetIncomingDescriptor();
290   if (incoming_descriptor->flags() & CallDescriptor::kCanUseRoots) {
291     RootIndexMap map(isolate());
292     int root_index = map.Lookup(*object);
293     if (root_index != RootIndexMap::kInvalidRootIndex) {
294       *index_return = static_cast<Heap::RootListIndex>(root_index);
295       return true;
296     }
297   }
298   return false;
299 }
300 
AssembleBlock(const InstructionBlock * block)301 CodeGenerator::CodeGenResult CodeGenerator::AssembleBlock(
302     const InstructionBlock* block) {
303   for (int i = block->code_start(); i < block->code_end(); ++i) {
304     Instruction* instr = code()->InstructionAt(i);
305     CodeGenResult result = AssembleInstruction(instr, block);
306     if (result != kSuccess) return result;
307   }
308   return kSuccess;
309 }
310 
IsValidPush(InstructionOperand source,CodeGenerator::PushTypeFlags push_type)311 bool CodeGenerator::IsValidPush(InstructionOperand source,
312                                 CodeGenerator::PushTypeFlags push_type) {
313   if (source.IsImmediate() &&
314       ((push_type & CodeGenerator::kImmediatePush) != 0)) {
315     return true;
316   }
317   if ((source.IsRegister() || source.IsStackSlot()) &&
318       ((push_type & CodeGenerator::kScalarPush) != 0)) {
319     return true;
320   }
321   if ((source.IsFloatRegister() || source.IsFloatStackSlot()) &&
322       ((push_type & CodeGenerator::kFloat32Push) != 0)) {
323     return true;
324   }
325   if ((source.IsDoubleRegister() || source.IsFloatStackSlot()) &&
326       ((push_type & CodeGenerator::kFloat64Push) != 0)) {
327     return true;
328   }
329   return false;
330 }
331 
GetPushCompatibleMoves(Instruction * instr,PushTypeFlags push_type,ZoneVector<MoveOperands * > * pushes)332 void CodeGenerator::GetPushCompatibleMoves(Instruction* instr,
333                                            PushTypeFlags push_type,
334                                            ZoneVector<MoveOperands*>* pushes) {
335   pushes->clear();
336   for (int i = Instruction::FIRST_GAP_POSITION;
337        i <= Instruction::LAST_GAP_POSITION; ++i) {
338     Instruction::GapPosition inner_pos =
339         static_cast<Instruction::GapPosition>(i);
340     ParallelMove* parallel_move = instr->GetParallelMove(inner_pos);
341     if (parallel_move != nullptr) {
342       for (auto move : *parallel_move) {
343         InstructionOperand source = move->source();
344         InstructionOperand destination = move->destination();
345         int first_push_compatible_index =
346             V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0;
347         // If there are any moves from slots that will be overridden by pushes,
348         // then the full gap resolver must be used since optimization with
349         // pushes don't participate in the parallel move and might clobber
350         // values needed for the gap resolve.
351         if (source.IsStackSlot() &&
352             LocationOperand::cast(source).index() >=
353                 first_push_compatible_index) {
354           pushes->clear();
355           return;
356         }
357         // TODO(danno): Right now, only consider moves from the FIRST gap for
358         // pushes. Theoretically, we could extract pushes for both gaps (there
359         // are cases where this happens), but the logic for that would also have
360         // to check to make sure that non-memory inputs to the pushes from the
361         // LAST gap don't get clobbered in the FIRST gap.
362         if (i == Instruction::FIRST_GAP_POSITION) {
363           if (destination.IsStackSlot() &&
364               LocationOperand::cast(destination).index() >=
365                   first_push_compatible_index) {
366             int index = LocationOperand::cast(destination).index();
367             if (IsValidPush(source, push_type)) {
368               if (index >= static_cast<int>(pushes->size())) {
369                 pushes->resize(index + 1);
370               }
371               (*pushes)[index] = move;
372             }
373           }
374         }
375       }
376     }
377   }
378 
379   // For now, only support a set of continuous pushes at the end of the list.
380   size_t push_count_upper_bound = pushes->size();
381   size_t push_begin = push_count_upper_bound;
382   for (auto move : base::Reversed(*pushes)) {
383     if (move == nullptr) break;
384     push_begin--;
385   }
386   size_t push_count = pushes->size() - push_begin;
387   std::copy(pushes->begin() + push_begin,
388             pushes->begin() + push_begin + push_count, pushes->begin());
389   pushes->resize(push_count);
390 }
391 
AssembleInstruction(Instruction * instr,const InstructionBlock * block)392 CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
393     Instruction* instr, const InstructionBlock* block) {
394   int first_unused_stack_slot;
395   bool adjust_stack =
396       GetSlotAboveSPBeforeTailCall(instr, &first_unused_stack_slot);
397   if (adjust_stack) AssembleTailCallBeforeGap(instr, first_unused_stack_slot);
398   AssembleGaps(instr);
399   if (adjust_stack) AssembleTailCallAfterGap(instr, first_unused_stack_slot);
400   DCHECK_IMPLIES(
401       block->must_deconstruct_frame(),
402       instr != code()->InstructionAt(block->last_instruction_index()) ||
403           instr->IsRet() || instr->IsJump());
404   if (instr->IsJump() && block->must_deconstruct_frame()) {
405     AssembleDeconstructFrame();
406   }
407   AssembleSourcePosition(instr);
408   // Assemble architecture-specific code for the instruction.
409   CodeGenResult result = AssembleArchInstruction(instr);
410   if (result != kSuccess) return result;
411 
412   FlagsMode mode = FlagsModeField::decode(instr->opcode());
413   FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
414   switch (mode) {
415     case kFlags_branch: {
416       // Assemble a branch after this instruction.
417       InstructionOperandConverter i(this, instr);
418       RpoNumber true_rpo = i.InputRpo(instr->InputCount() - 2);
419       RpoNumber false_rpo = i.InputRpo(instr->InputCount() - 1);
420 
421       if (true_rpo == false_rpo) {
422         // redundant branch.
423         if (!IsNextInAssemblyOrder(true_rpo)) {
424           AssembleArchJump(true_rpo);
425         }
426         return kSuccess;
427       }
428       if (IsNextInAssemblyOrder(true_rpo)) {
429         // true block is next, can fall through if condition negated.
430         std::swap(true_rpo, false_rpo);
431         condition = NegateFlagsCondition(condition);
432       }
433       BranchInfo branch;
434       branch.condition = condition;
435       branch.true_label = GetLabel(true_rpo);
436       branch.false_label = GetLabel(false_rpo);
437       branch.fallthru = IsNextInAssemblyOrder(false_rpo);
438       // Assemble architecture-specific branch.
439       AssembleArchBranch(instr, &branch);
440       break;
441     }
442     case kFlags_deoptimize: {
443       // Assemble a conditional eager deoptimization after this instruction.
444       InstructionOperandConverter i(this, instr);
445       size_t frame_state_offset = MiscField::decode(instr->opcode());
446       DeoptimizationExit* const exit =
447           AddDeoptimizationExit(instr, frame_state_offset);
448       Label continue_label;
449       BranchInfo branch;
450       branch.condition = condition;
451       branch.true_label = exit->label();
452       branch.false_label = &continue_label;
453       branch.fallthru = true;
454       // Assemble architecture-specific branch.
455       AssembleArchBranch(instr, &branch);
456       masm()->bind(&continue_label);
457       break;
458     }
459     case kFlags_set: {
460       // Assemble a boolean materialization after this instruction.
461       AssembleArchBoolean(instr, condition);
462       break;
463     }
464     case kFlags_none: {
465       break;
466     }
467   }
468   return kSuccess;
469 }
470 
471 
AssembleSourcePosition(Instruction * instr)472 void CodeGenerator::AssembleSourcePosition(Instruction* instr) {
473   SourcePosition source_position = SourcePosition::Unknown();
474   if (!code()->GetSourcePosition(instr, &source_position)) return;
475   if (source_position == current_source_position_) return;
476   current_source_position_ = source_position;
477   if (!source_position.IsKnown()) return;
478   source_position_table_builder_.AddPosition(masm()->pc_offset(),
479                                              source_position, false);
480   if (FLAG_code_comments) {
481     CompilationInfo* info = this->info();
482     if (!info->parse_info()) return;
483     std::ostringstream buffer;
484     buffer << "-- " << source_position.InliningStack(info) << " --";
485     masm()->RecordComment(StrDup(buffer.str().c_str()));
486   }
487 }
488 
GetSlotAboveSPBeforeTailCall(Instruction * instr,int * slot)489 bool CodeGenerator::GetSlotAboveSPBeforeTailCall(Instruction* instr,
490                                                  int* slot) {
491   if (instr->IsTailCall()) {
492     InstructionOperandConverter g(this, instr);
493     *slot = g.InputInt32(instr->InputCount() - 1);
494     return true;
495   } else {
496     return false;
497   }
498 }
499 
AssembleGaps(Instruction * instr)500 void CodeGenerator::AssembleGaps(Instruction* instr) {
501   for (int i = Instruction::FIRST_GAP_POSITION;
502        i <= Instruction::LAST_GAP_POSITION; i++) {
503     Instruction::GapPosition inner_pos =
504         static_cast<Instruction::GapPosition>(i);
505     ParallelMove* move = instr->GetParallelMove(inner_pos);
506     if (move != nullptr) resolver()->Resolve(move);
507   }
508 }
509 
510 namespace {
511 
CreateInliningPositions(CompilationInfo * info)512 Handle<PodArray<InliningPosition>> CreateInliningPositions(
513     CompilationInfo* info) {
514   const CompilationInfo::InlinedFunctionList& inlined_functions =
515       info->inlined_functions();
516   if (inlined_functions.size() == 0) {
517     return Handle<PodArray<InliningPosition>>::cast(
518         info->isolate()->factory()->empty_byte_array());
519   }
520   Handle<PodArray<InliningPosition>> inl_positions =
521       PodArray<InliningPosition>::New(
522           info->isolate(), static_cast<int>(inlined_functions.size()), TENURED);
523   for (size_t i = 0; i < inlined_functions.size(); ++i) {
524     inl_positions->set(static_cast<int>(i), inlined_functions[i].position);
525   }
526   return inl_positions;
527 }
528 
529 }  // namespace
530 
PopulateDeoptimizationData(Handle<Code> code_object)531 void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
532   CompilationInfo* info = this->info();
533   int deopt_count = static_cast<int>(deoptimization_states_.size());
534   if (deopt_count == 0 && !info->is_osr()) return;
535   Handle<DeoptimizationInputData> data =
536       DeoptimizationInputData::New(isolate(), deopt_count, TENURED);
537 
538   Handle<ByteArray> translation_array =
539       translations_.CreateByteArray(isolate()->factory());
540 
541   data->SetTranslationByteArray(*translation_array);
542   data->SetInlinedFunctionCount(
543       Smi::FromInt(static_cast<int>(inlined_function_count_)));
544   data->SetOptimizationId(Smi::FromInt(info->optimization_id()));
545 
546   if (info->has_shared_info()) {
547     data->SetSharedFunctionInfo(*info->shared_info());
548   } else {
549     data->SetSharedFunctionInfo(Smi::kZero);
550   }
551 
552   Handle<FixedArray> literals = isolate()->factory()->NewFixedArray(
553       static_cast<int>(deoptimization_literals_.size()), TENURED);
554   {
555     AllowDeferredHandleDereference copy_handles;
556     for (unsigned i = 0; i < deoptimization_literals_.size(); i++) {
557       literals->set(i, *deoptimization_literals_[i]);
558     }
559     data->SetLiteralArray(*literals);
560   }
561 
562   Handle<PodArray<InliningPosition>> inl_pos = CreateInliningPositions(info);
563   data->SetInliningPositions(*inl_pos);
564 
565   if (info->is_osr()) {
566     DCHECK(osr_pc_offset_ >= 0);
567     data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
568     data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
569   } else {
570     BailoutId osr_ast_id = BailoutId::None();
571     data->SetOsrAstId(Smi::FromInt(osr_ast_id.ToInt()));
572     data->SetOsrPcOffset(Smi::FromInt(-1));
573   }
574 
575   // Populate deoptimization entries.
576   for (int i = 0; i < deopt_count; i++) {
577     DeoptimizationState* deoptimization_state = deoptimization_states_[i];
578     data->SetAstId(i, deoptimization_state->bailout_id());
579     CHECK(deoptimization_states_[i]);
580     data->SetTranslationIndex(
581         i, Smi::FromInt(deoptimization_states_[i]->translation_id()));
582     data->SetArgumentsStackHeight(i, Smi::kZero);
583     data->SetPc(i, Smi::FromInt(deoptimization_state->pc_offset()));
584   }
585 
586   code_object->set_deoptimization_data(*data);
587 }
588 
589 
AddJumpTable(Label ** targets,size_t target_count)590 Label* CodeGenerator::AddJumpTable(Label** targets, size_t target_count) {
591   jump_tables_ = new (zone()) JumpTable(jump_tables_, targets, target_count);
592   return jump_tables_->label();
593 }
594 
595 
RecordCallPosition(Instruction * instr)596 void CodeGenerator::RecordCallPosition(Instruction* instr) {
597   CallDescriptor::Flags flags(MiscField::decode(instr->opcode()));
598 
599   bool needs_frame_state = (flags & CallDescriptor::kNeedsFrameState);
600 
601   RecordSafepoint(
602       instr->reference_map(), Safepoint::kSimple, 0,
603       needs_frame_state ? Safepoint::kLazyDeopt : Safepoint::kNoLazyDeopt);
604 
605   if (flags & CallDescriptor::kHasExceptionHandler) {
606     InstructionOperandConverter i(this, instr);
607     RpoNumber handler_rpo = i.InputRpo(instr->InputCount() - 1);
608     handlers_.push_back({GetLabel(handler_rpo), masm()->pc_offset()});
609   }
610 
611   if (needs_frame_state) {
612     MarkLazyDeoptSite();
613     // If the frame state is present, it starts at argument 1 (just after the
614     // code address).
615     size_t frame_state_offset = 1;
616     FrameStateDescriptor* descriptor =
617         GetDeoptimizationEntry(instr, frame_state_offset).descriptor();
618     int pc_offset = masm()->pc_offset();
619     int deopt_state_id = BuildTranslation(instr, pc_offset, frame_state_offset,
620                                           descriptor->state_combine());
621     // If the pre-call frame state differs from the post-call one, produce the
622     // pre-call frame state, too.
623     // TODO(jarin) We might want to avoid building the pre-call frame state
624     // because it is only used to get locals and arguments (by the debugger and
625     // f.arguments), and those are the same in the pre-call and post-call
626     // states.
627     if (!descriptor->state_combine().IsOutputIgnored()) {
628       deopt_state_id = BuildTranslation(instr, -1, frame_state_offset,
629                                         OutputFrameStateCombine::Ignore());
630     }
631 #if DEBUG
632     // Make sure all the values live in stack slots or they are immediates.
633     // (The values should not live in register because registers are clobbered
634     // by calls.)
635     for (size_t i = 0; i < descriptor->GetSize(); i++) {
636       InstructionOperand* op = instr->InputAt(frame_state_offset + 1 + i);
637       CHECK(op->IsStackSlot() || op->IsFPStackSlot() || op->IsImmediate());
638     }
639 #endif
640     safepoints()->RecordLazyDeoptimizationIndex(deopt_state_id);
641   }
642 }
643 
644 
DefineDeoptimizationLiteral(Handle<Object> literal)645 int CodeGenerator::DefineDeoptimizationLiteral(Handle<Object> literal) {
646   int result = static_cast<int>(deoptimization_literals_.size());
647   for (unsigned i = 0; i < deoptimization_literals_.size(); ++i) {
648     if (deoptimization_literals_[i].is_identical_to(literal)) return i;
649   }
650   deoptimization_literals_.push_back(literal);
651   return result;
652 }
653 
GetDeoptimizationEntry(Instruction * instr,size_t frame_state_offset)654 DeoptimizationEntry const& CodeGenerator::GetDeoptimizationEntry(
655     Instruction* instr, size_t frame_state_offset) {
656   InstructionOperandConverter i(this, instr);
657   int const state_id = i.InputInt32(frame_state_offset);
658   return code()->GetDeoptimizationEntry(state_id);
659 }
660 
GetDeoptimizationReason(int deoptimization_id) const661 DeoptimizeReason CodeGenerator::GetDeoptimizationReason(
662     int deoptimization_id) const {
663   size_t const index = static_cast<size_t>(deoptimization_id);
664   DCHECK_LT(index, deoptimization_states_.size());
665   return deoptimization_states_[index]->reason();
666 }
667 
TranslateStateValueDescriptor(StateValueDescriptor * desc,Translation * translation,InstructionOperandIterator * iter)668 void CodeGenerator::TranslateStateValueDescriptor(
669     StateValueDescriptor* desc, Translation* translation,
670     InstructionOperandIterator* iter) {
671   if (desc->IsNested()) {
672     translation->BeginCapturedObject(static_cast<int>(desc->size()));
673     for (size_t index = 0; index < desc->fields().size(); index++) {
674       TranslateStateValueDescriptor(&desc->fields()[index], translation, iter);
675     }
676   } else if (desc->IsDuplicate()) {
677     translation->DuplicateObject(static_cast<int>(desc->id()));
678   } else {
679     DCHECK(desc->IsPlain());
680     AddTranslationForOperand(translation, iter->instruction(), iter->Advance(),
681                              desc->type());
682   }
683 }
684 
685 
TranslateFrameStateDescriptorOperands(FrameStateDescriptor * desc,InstructionOperandIterator * iter,OutputFrameStateCombine combine,Translation * translation)686 void CodeGenerator::TranslateFrameStateDescriptorOperands(
687     FrameStateDescriptor* desc, InstructionOperandIterator* iter,
688     OutputFrameStateCombine combine, Translation* translation) {
689   for (size_t index = 0; index < desc->GetSize(combine); index++) {
690     switch (combine.kind()) {
691       case OutputFrameStateCombine::kPushOutput: {
692         DCHECK(combine.GetPushCount() <= iter->instruction()->OutputCount());
693         size_t size_without_output =
694             desc->GetSize(OutputFrameStateCombine::Ignore());
695         // If the index is past the existing stack items in values_.
696         if (index >= size_without_output) {
697           // Materialize the result of the call instruction in this slot.
698           AddTranslationForOperand(
699               translation, iter->instruction(),
700               iter->instruction()->OutputAt(index - size_without_output),
701               MachineType::AnyTagged());
702           continue;
703         }
704         break;
705       }
706       case OutputFrameStateCombine::kPokeAt:
707         // The result of the call should be placed at position
708         // [index_from_top] in the stack (overwriting whatever was
709         // previously there).
710         size_t index_from_top =
711             desc->GetSize(combine) - 1 - combine.GetOffsetToPokeAt();
712         if (index >= index_from_top &&
713             index < index_from_top + iter->instruction()->OutputCount()) {
714           AddTranslationForOperand(
715               translation, iter->instruction(),
716               iter->instruction()->OutputAt(index - index_from_top),
717               MachineType::AnyTagged());
718           iter->Advance();  // We do not use this input, but we need to
719                             // advace, as the input got replaced.
720           continue;
721         }
722         break;
723     }
724     StateValueDescriptor* value_desc = desc->GetStateValueDescriptor();
725     TranslateStateValueDescriptor(&value_desc->fields()[index], translation,
726                                   iter);
727   }
728 }
729 
730 
BuildTranslationForFrameStateDescriptor(FrameStateDescriptor * descriptor,InstructionOperandIterator * iter,Translation * translation,OutputFrameStateCombine state_combine)731 void CodeGenerator::BuildTranslationForFrameStateDescriptor(
732     FrameStateDescriptor* descriptor, InstructionOperandIterator* iter,
733     Translation* translation, OutputFrameStateCombine state_combine) {
734   // Outer-most state must be added to translation first.
735   if (descriptor->outer_state() != nullptr) {
736     BuildTranslationForFrameStateDescriptor(descriptor->outer_state(), iter,
737                                             translation,
738                                             OutputFrameStateCombine::Ignore());
739   }
740 
741   Handle<SharedFunctionInfo> shared_info;
742   if (!descriptor->shared_info().ToHandle(&shared_info)) {
743     if (!info()->has_shared_info()) {
744       return;  // Stub with no SharedFunctionInfo.
745     }
746     shared_info = info()->shared_info();
747   }
748   int shared_info_id = DefineDeoptimizationLiteral(shared_info);
749 
750   switch (descriptor->type()) {
751     case FrameStateType::kJavaScriptFunction:
752       translation->BeginJSFrame(
753           descriptor->bailout_id(), shared_info_id,
754           static_cast<unsigned int>(descriptor->GetSize(state_combine) -
755                                     (1 + descriptor->parameters_count())));
756       break;
757     case FrameStateType::kInterpretedFunction:
758       translation->BeginInterpretedFrame(
759           descriptor->bailout_id(), shared_info_id,
760           static_cast<unsigned int>(descriptor->locals_count() + 1));
761       break;
762     case FrameStateType::kArgumentsAdaptor:
763       translation->BeginArgumentsAdaptorFrame(
764           shared_info_id,
765           static_cast<unsigned int>(descriptor->parameters_count()));
766       break;
767     case FrameStateType::kTailCallerFunction:
768       translation->BeginTailCallerFrame(shared_info_id);
769       break;
770     case FrameStateType::kConstructStub:
771       translation->BeginConstructStubFrame(
772           shared_info_id,
773           static_cast<unsigned int>(descriptor->parameters_count()));
774       break;
775     case FrameStateType::kGetterStub:
776       translation->BeginGetterStubFrame(shared_info_id);
777       break;
778     case FrameStateType::kSetterStub:
779       translation->BeginSetterStubFrame(shared_info_id);
780       break;
781   }
782 
783   TranslateFrameStateDescriptorOperands(descriptor, iter, state_combine,
784                                         translation);
785 }
786 
787 
BuildTranslation(Instruction * instr,int pc_offset,size_t frame_state_offset,OutputFrameStateCombine state_combine)788 int CodeGenerator::BuildTranslation(Instruction* instr, int pc_offset,
789                                     size_t frame_state_offset,
790                                     OutputFrameStateCombine state_combine) {
791   DeoptimizationEntry const& entry =
792       GetDeoptimizationEntry(instr, frame_state_offset);
793   FrameStateDescriptor* const descriptor = entry.descriptor();
794   frame_state_offset++;
795 
796   Translation translation(
797       &translations_, static_cast<int>(descriptor->GetFrameCount()),
798       static_cast<int>(descriptor->GetJSFrameCount()), zone());
799   InstructionOperandIterator iter(instr, frame_state_offset);
800   BuildTranslationForFrameStateDescriptor(descriptor, &iter, &translation,
801                                           state_combine);
802 
803   int deoptimization_id = static_cast<int>(deoptimization_states_.size());
804 
805   deoptimization_states_.push_back(new (zone()) DeoptimizationState(
806       descriptor->bailout_id(), translation.index(), pc_offset,
807       entry.reason()));
808 
809   return deoptimization_id;
810 }
811 
812 
AddTranslationForOperand(Translation * translation,Instruction * instr,InstructionOperand * op,MachineType type)813 void CodeGenerator::AddTranslationForOperand(Translation* translation,
814                                              Instruction* instr,
815                                              InstructionOperand* op,
816                                              MachineType type) {
817   if (op->IsStackSlot()) {
818     if (type.representation() == MachineRepresentation::kBit) {
819       translation->StoreBoolStackSlot(LocationOperand::cast(op)->index());
820     } else if (type == MachineType::Int8() || type == MachineType::Int16() ||
821                type == MachineType::Int32()) {
822       translation->StoreInt32StackSlot(LocationOperand::cast(op)->index());
823     } else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
824                type == MachineType::Uint32()) {
825       translation->StoreUint32StackSlot(LocationOperand::cast(op)->index());
826     } else if (IsAnyTagged(type.representation())) {
827       translation->StoreStackSlot(LocationOperand::cast(op)->index());
828     } else {
829       CHECK(false);
830     }
831   } else if (op->IsFPStackSlot()) {
832     if (type.representation() == MachineRepresentation::kFloat64) {
833       translation->StoreDoubleStackSlot(LocationOperand::cast(op)->index());
834     } else {
835       DCHECK_EQ(MachineRepresentation::kFloat32, type.representation());
836       translation->StoreFloatStackSlot(LocationOperand::cast(op)->index());
837     }
838   } else if (op->IsRegister()) {
839     InstructionOperandConverter converter(this, instr);
840     if (type.representation() == MachineRepresentation::kBit) {
841       translation->StoreBoolRegister(converter.ToRegister(op));
842     } else if (type == MachineType::Int8() || type == MachineType::Int16() ||
843                type == MachineType::Int32()) {
844       translation->StoreInt32Register(converter.ToRegister(op));
845     } else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
846                type == MachineType::Uint32()) {
847       translation->StoreUint32Register(converter.ToRegister(op));
848     } else if (IsAnyTagged(type.representation())) {
849       translation->StoreRegister(converter.ToRegister(op));
850     } else {
851       CHECK(false);
852     }
853   } else if (op->IsFPRegister()) {
854     InstructionOperandConverter converter(this, instr);
855     if (type.representation() == MachineRepresentation::kFloat64) {
856       translation->StoreDoubleRegister(converter.ToDoubleRegister(op));
857     } else {
858       DCHECK_EQ(MachineRepresentation::kFloat32, type.representation());
859       translation->StoreFloatRegister(converter.ToFloatRegister(op));
860     }
861   } else if (op->IsImmediate()) {
862     InstructionOperandConverter converter(this, instr);
863     Constant constant = converter.ToConstant(op);
864     Handle<Object> constant_object;
865     switch (constant.type()) {
866       case Constant::kInt32:
867         if (type.representation() == MachineRepresentation::kTagged ||
868             type.representation() == MachineRepresentation::kTaggedSigned) {
869           // When pointers are 4 bytes, we can use int32 constants to represent
870           // Smis.
871           DCHECK_EQ(4, kPointerSize);
872           constant_object =
873               handle(reinterpret_cast<Smi*>(constant.ToInt32()), isolate());
874           DCHECK(constant_object->IsSmi());
875         } else if (type.representation() == MachineRepresentation::kBit) {
876           if (constant.ToInt32() == 0) {
877             constant_object = isolate()->factory()->false_value();
878           } else {
879             DCHECK_EQ(1, constant.ToInt32());
880             constant_object = isolate()->factory()->true_value();
881           }
882         } else {
883           // TODO(jarin,bmeurer): We currently pass in raw pointers to the
884           // JSFunction::entry here. We should really consider fixing this.
885           DCHECK(type == MachineType::Int32() ||
886                  type == MachineType::Uint32() ||
887                  type.representation() == MachineRepresentation::kWord32 ||
888                  type.representation() == MachineRepresentation::kNone);
889           DCHECK(type.representation() != MachineRepresentation::kNone ||
890                  constant.ToInt32() == FrameStateDescriptor::kImpossibleValue);
891 
892           constant_object =
893               isolate()->factory()->NewNumberFromInt(constant.ToInt32());
894         }
895         break;
896       case Constant::kInt64:
897         // When pointers are 8 bytes, we can use int64 constants to represent
898         // Smis.
899         // TODO(jarin,bmeurer): We currently pass in raw pointers to the
900         // JSFunction::entry here. We should really consider fixing this.
901         DCHECK(type.representation() == MachineRepresentation::kWord64 ||
902                type.representation() == MachineRepresentation::kTagged ||
903                type.representation() == MachineRepresentation::kTaggedSigned);
904         DCHECK_EQ(8, kPointerSize);
905         constant_object =
906             handle(reinterpret_cast<Smi*>(constant.ToInt64()), isolate());
907         DCHECK(constant_object->IsSmi());
908         break;
909       case Constant::kFloat32:
910         if (type.representation() == MachineRepresentation::kTaggedSigned) {
911           DCHECK(IsSmiDouble(constant.ToFloat32()));
912         } else {
913           DCHECK(type.representation() == MachineRepresentation::kFloat32 ||
914                  CanBeTaggedPointer(type.representation()));
915         }
916         constant_object = isolate()->factory()->NewNumber(constant.ToFloat32());
917         break;
918       case Constant::kFloat64:
919         if (type.representation() == MachineRepresentation::kTaggedSigned) {
920           DCHECK(IsSmiDouble(constant.ToFloat64()));
921         } else {
922           DCHECK(type.representation() == MachineRepresentation::kFloat64 ||
923                  CanBeTaggedPointer(type.representation()));
924         }
925         constant_object = isolate()->factory()->NewNumber(constant.ToFloat64());
926         break;
927       case Constant::kHeapObject:
928         DCHECK(CanBeTaggedPointer(type.representation()));
929         constant_object = constant.ToHeapObject();
930         break;
931       default:
932         CHECK(false);
933     }
934     if (constant_object.is_identical_to(info()->closure())) {
935       translation->StoreJSFrameFunction();
936     } else {
937       int literal_id = DefineDeoptimizationLiteral(constant_object);
938       translation->StoreLiteral(literal_id);
939     }
940   } else {
941     CHECK(false);
942   }
943 }
944 
945 
MarkLazyDeoptSite()946 void CodeGenerator::MarkLazyDeoptSite() {
947   last_lazy_deopt_pc_ = masm()->pc_offset();
948 }
949 
AddDeoptimizationExit(Instruction * instr,size_t frame_state_offset)950 DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
951     Instruction* instr, size_t frame_state_offset) {
952   int const deoptimization_id = BuildTranslation(
953       instr, -1, frame_state_offset, OutputFrameStateCombine::Ignore());
954   DeoptimizationExit* const exit = new (zone())
955       DeoptimizationExit(deoptimization_id, current_source_position_);
956   deoptimization_exits_.push_back(exit);
957   return exit;
958 }
959 
OutOfLineCode(CodeGenerator * gen)960 OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
961     : frame_(gen->frame()), masm_(gen->masm()), next_(gen->ools_) {
962   gen->ools_ = this;
963 }
964 
965 
~OutOfLineCode()966 OutOfLineCode::~OutOfLineCode() {}
967 
968 }  // namespace compiler
969 }  // namespace internal
970 }  // namespace v8
971