• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/backend/code-generator.h"
6 
7 #include "src/base/iterator.h"
8 #include "src/codegen/assembler-inl.h"
9 #include "src/codegen/macro-assembler-inl.h"
10 #include "src/codegen/optimized-compilation-info.h"
11 #include "src/codegen/string-constants.h"
12 #include "src/compiler/backend/code-generator-impl.h"
13 #include "src/compiler/globals.h"
14 #include "src/compiler/linkage.h"
15 #include "src/compiler/pipeline.h"
16 #include "src/compiler/wasm-compiler.h"
17 #include "src/diagnostics/eh-frame.h"
18 #include "src/execution/frames.h"
19 #include "src/logging/counters.h"
20 #include "src/logging/log.h"
21 #include "src/objects/smi.h"
22 #include "src/utils/address-map.h"
23 
24 namespace v8 {
25 namespace internal {
26 namespace compiler {
27 
28 class CodeGenerator::JumpTable final : public ZoneObject {
29  public:
JumpTable(JumpTable * next,Label ** targets,size_t target_count)30   JumpTable(JumpTable* next, Label** targets, size_t target_count)
31       : next_(next), targets_(targets), target_count_(target_count) {}
32 
label()33   Label* label() { return &label_; }
next() const34   JumpTable* next() const { return next_; }
targets() const35   Label** targets() const { return targets_; }
target_count() const36   size_t target_count() const { return target_count_; }
37 
38  private:
39   Label label_;
40   JumpTable* const next_;
41   Label** const targets_;
42   size_t const target_count_;
43 };
44 
CodeGenerator(Zone * codegen_zone,Frame * frame,Linkage * linkage,InstructionSequence * instructions,OptimizedCompilationInfo * info,Isolate * isolate,base::Optional<OsrHelper> osr_helper,int start_source_position,JumpOptimizationInfo * jump_opt,PoisoningMitigationLevel poisoning_level,const AssemblerOptions & options,int32_t builtin_index,size_t max_unoptimized_frame_height,size_t max_pushed_argument_count,std::unique_ptr<AssemblerBuffer> buffer,const char * debug_name)45 CodeGenerator::CodeGenerator(
46     Zone* codegen_zone, Frame* frame, Linkage* linkage,
47     InstructionSequence* instructions, OptimizedCompilationInfo* info,
48     Isolate* isolate, base::Optional<OsrHelper> osr_helper,
49     int start_source_position, JumpOptimizationInfo* jump_opt,
50     PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options,
51     int32_t builtin_index, size_t max_unoptimized_frame_height,
52     size_t max_pushed_argument_count, std::unique_ptr<AssemblerBuffer> buffer,
53     const char* debug_name)
54     : zone_(codegen_zone),
55       isolate_(isolate),
56       frame_access_state_(nullptr),
57       linkage_(linkage),
58       instructions_(instructions),
59       unwinding_info_writer_(codegen_zone),
60       info_(info),
61       labels_(
62           codegen_zone->NewArray<Label>(instructions->InstructionBlockCount())),
63       current_block_(RpoNumber::Invalid()),
64       start_source_position_(start_source_position),
65       current_source_position_(SourcePosition::Unknown()),
66       tasm_(isolate, options, CodeObjectRequired::kNo, std::move(buffer)),
67       resolver_(this),
68       safepoints_(codegen_zone),
69       handlers_(codegen_zone),
70       deoptimization_exits_(codegen_zone),
71       deoptimization_literals_(codegen_zone),
72       translations_(codegen_zone),
73       max_unoptimized_frame_height_(max_unoptimized_frame_height),
74       max_pushed_argument_count_(max_pushed_argument_count),
75       caller_registers_saved_(false),
76       jump_tables_(nullptr),
77       ools_(nullptr),
78       osr_helper_(std::move(osr_helper)),
79       osr_pc_offset_(-1),
80       optimized_out_literal_id_(-1),
81       source_position_table_builder_(
82           codegen_zone, SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS),
83       protected_instructions_(codegen_zone),
84       result_(kSuccess),
85       poisoning_level_(poisoning_level),
86       block_starts_(codegen_zone),
87       instr_starts_(codegen_zone),
88       debug_name_(debug_name) {
89   for (int i = 0; i < instructions->InstructionBlockCount(); ++i) {
90     new (&labels_[i]) Label;
91   }
92   CreateFrameAccessState(frame);
93   CHECK_EQ(info->is_osr(), osr_helper_.has_value());
94   tasm_.set_jump_optimization_info(jump_opt);
95   CodeKind code_kind = info->code_kind();
96   if (code_kind == CodeKind::WASM_FUNCTION ||
97       code_kind == CodeKind::WASM_TO_CAPI_FUNCTION ||
98       code_kind == CodeKind::WASM_TO_JS_FUNCTION ||
99       code_kind == CodeKind::JS_TO_WASM_FUNCTION) {
100     tasm_.set_abort_hard(true);
101   }
102   tasm_.set_builtin_index(builtin_index);
103 }
104 
wasm_runtime_exception_support() const105 bool CodeGenerator::wasm_runtime_exception_support() const {
106   DCHECK_NOT_NULL(info_);
107   return info_->wasm_runtime_exception_support();
108 }
109 
AddProtectedInstructionLanding(uint32_t instr_offset,uint32_t landing_offset)110 void CodeGenerator::AddProtectedInstructionLanding(uint32_t instr_offset,
111                                                    uint32_t landing_offset) {
112   protected_instructions_.push_back({instr_offset, landing_offset});
113 }
114 
CreateFrameAccessState(Frame * frame)115 void CodeGenerator::CreateFrameAccessState(Frame* frame) {
116   FinishFrame(frame);
117   frame_access_state_ = zone()->New<FrameAccessState>(frame);
118 }
119 
ShouldApplyOffsetToStackCheck(Instruction * instr,uint32_t * offset)120 bool CodeGenerator::ShouldApplyOffsetToStackCheck(Instruction* instr,
121                                                   uint32_t* offset) {
122   DCHECK_EQ(instr->arch_opcode(), kArchStackPointerGreaterThan);
123 
124   StackCheckKind kind =
125       static_cast<StackCheckKind>(MiscField::decode(instr->opcode()));
126   if (kind != StackCheckKind::kJSFunctionEntry) return false;
127 
128   uint32_t stack_check_offset = *offset = GetStackCheckOffset();
129   return stack_check_offset > kStackLimitSlackForDeoptimizationInBytes;
130 }
131 
GetStackCheckOffset()132 uint32_t CodeGenerator::GetStackCheckOffset() {
133   if (!frame_access_state()->has_frame()) {
134     DCHECK_EQ(max_unoptimized_frame_height_, 0);
135     DCHECK_EQ(max_pushed_argument_count_, 0);
136     return 0;
137   }
138 
139   int32_t optimized_frame_height =
140       frame()->GetTotalFrameSlotCount() * kSystemPointerSize;
141   DCHECK(is_int32(max_unoptimized_frame_height_));
142   int32_t signed_max_unoptimized_frame_height =
143       static_cast<int32_t>(max_unoptimized_frame_height_);
144 
145   // The offset is either the delta between the optimized frames and the
146   // interpreted frame, or the maximal number of bytes pushed to the stack
147   // while preparing for function calls, whichever is bigger.
148   uint32_t frame_height_delta = static_cast<uint32_t>(std::max(
149       signed_max_unoptimized_frame_height - optimized_frame_height, 0));
150   uint32_t max_pushed_argument_bytes =
151       static_cast<uint32_t>(max_pushed_argument_count_ * kSystemPointerSize);
152   return std::max(frame_height_delta, max_pushed_argument_bytes);
153 }
154 
AssembleDeoptimizerCall(DeoptimizationExit * exit)155 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
156     DeoptimizationExit* exit) {
157   int deoptimization_id = exit->deoptimization_id();
158   if (deoptimization_id > Deoptimizer::kMaxNumberOfEntries) {
159     return kTooManyDeoptimizationBailouts;
160   }
161 
162   DeoptimizeKind deopt_kind = exit->kind();
163 
164   DeoptimizeReason deoptimization_reason = exit->reason();
165   Builtins::Name deopt_entry =
166       Deoptimizer::GetDeoptimizationEntry(tasm()->isolate(), deopt_kind);
167   Label* jump_deoptimization_entry_label =
168       &jump_deoptimization_entry_labels_[static_cast<int>(deopt_kind)];
169   if (info()->source_positions()) {
170     tasm()->RecordDeoptReason(deoptimization_reason, exit->pos(),
171                               deoptimization_id);
172   }
173 
174   if (deopt_kind == DeoptimizeKind::kLazy) {
175     tasm()->BindExceptionHandler(exit->label());
176   } else {
177     ++non_lazy_deopt_count_;
178     tasm()->bind(exit->label());
179   }
180 
181   tasm()->CallForDeoptimization(deopt_entry, deoptimization_id, exit->label(),
182                                 deopt_kind, jump_deoptimization_entry_label);
183   exit->set_emitted();
184   return kSuccess;
185 }
186 
MaybeEmitOutOfLineConstantPool()187 void CodeGenerator::MaybeEmitOutOfLineConstantPool() {
188   tasm()->MaybeEmitOutOfLineConstantPool();
189 }
190 
AssembleCode()191 void CodeGenerator::AssembleCode() {
192   OptimizedCompilationInfo* info = this->info();
193 
194   // Open a frame scope to indicate that there is a frame on the stack.  The
195   // MANUAL indicates that the scope shouldn't actually generate code to set up
196   // the frame (that is done in AssemblePrologue).
197   FrameScope frame_scope(tasm(), StackFrame::MANUAL);
198 
199   if (info->source_positions()) {
200     AssembleSourcePosition(start_source_position());
201   }
202   offsets_info_.code_start_register_check = tasm()->pc_offset();
203 
204   tasm()->CodeEntry();
205 
206   // Check that {kJavaScriptCallCodeStartRegister} has been set correctly.
207   if (FLAG_debug_code && info->called_with_code_start_register()) {
208     tasm()->RecordComment("-- Prologue: check code start register --");
209     AssembleCodeStartRegisterCheck();
210   }
211 
212   offsets_info_.deopt_check = tasm()->pc_offset();
213   // We want to bailout only from JS functions, which are the only ones
214   // that are optimized.
215   if (info->IsOptimizing()) {
216     DCHECK(linkage()->GetIncomingDescriptor()->IsJSFunctionCall());
217     tasm()->RecordComment("-- Prologue: check for deoptimization --");
218     BailoutIfDeoptimized();
219   }
220 
221   offsets_info_.init_poison = tasm()->pc_offset();
222   InitializeSpeculationPoison();
223 
224   // Define deoptimization literals for all inlined functions.
225   DCHECK_EQ(0u, deoptimization_literals_.size());
226   for (OptimizedCompilationInfo::InlinedFunctionHolder& inlined :
227        info->inlined_functions()) {
228     if (!inlined.shared_info.equals(info->shared_info())) {
229       int index = DefineDeoptimizationLiteral(
230           DeoptimizationLiteral(inlined.shared_info));
231       inlined.RegisterInlinedFunctionId(index);
232     }
233   }
234   inlined_function_count_ = deoptimization_literals_.size();
235 
236   // Define deoptimization literals for all BytecodeArrays to which we might
237   // deopt to ensure they are strongly held by the optimized code.
238   if (info->has_bytecode_array()) {
239     DefineDeoptimizationLiteral(DeoptimizationLiteral(info->bytecode_array()));
240   }
241   for (OptimizedCompilationInfo::InlinedFunctionHolder& inlined :
242        info->inlined_functions()) {
243     DefineDeoptimizationLiteral(DeoptimizationLiteral(inlined.bytecode_array));
244   }
245 
246   unwinding_info_writer_.SetNumberOfInstructionBlocks(
247       instructions()->InstructionBlockCount());
248 
249   if (info->trace_turbo_json()) {
250     block_starts_.assign(instructions()->instruction_blocks().size(), -1);
251     instr_starts_.assign(instructions()->instructions().size(), {});
252   }
253   // Assemble instructions in assembly order.
254   offsets_info_.blocks_start = tasm()->pc_offset();
255   for (const InstructionBlock* block : instructions()->ao_blocks()) {
256     // Align loop headers on vendor recommended boundaries.
257     if (block->ShouldAlign() && !tasm()->jump_optimization_info()) {
258       tasm()->CodeTargetAlign();
259     }
260     if (info->trace_turbo_json()) {
261       block_starts_[block->rpo_number().ToInt()] = tasm()->pc_offset();
262     }
263     // Bind a label for a block.
264     current_block_ = block->rpo_number();
265     unwinding_info_writer_.BeginInstructionBlock(tasm()->pc_offset(), block);
266     if (FLAG_code_comments) {
267       std::ostringstream buffer;
268       buffer << "-- B" << block->rpo_number().ToInt() << " start";
269       if (block->IsDeferred()) buffer << " (deferred)";
270       if (!block->needs_frame()) buffer << " (no frame)";
271       if (block->must_construct_frame()) buffer << " (construct frame)";
272       if (block->must_deconstruct_frame()) buffer << " (deconstruct frame)";
273 
274       if (block->IsLoopHeader()) {
275         buffer << " (loop up to " << block->loop_end().ToInt() << ")";
276       }
277       if (block->loop_header().IsValid()) {
278         buffer << " (in loop " << block->loop_header().ToInt() << ")";
279       }
280       buffer << " --";
281       tasm()->RecordComment(buffer.str().c_str());
282     }
283 
284     frame_access_state()->MarkHasFrame(block->needs_frame());
285 
286     tasm()->bind(GetLabel(current_block_));
287 
288     TryInsertBranchPoisoning(block);
289 
290     if (block->must_construct_frame()) {
291       AssembleConstructFrame();
292       // We need to setup the root register after we assemble the prologue, to
293       // avoid clobbering callee saved registers in case of C linkage and
294       // using the roots.
295       // TODO(mtrofin): investigate how we can avoid doing this repeatedly.
296       if (linkage()->GetIncomingDescriptor()->InitializeRootRegister()) {
297         tasm()->InitializeRootRegister();
298       }
299     }
300 
301     if (FLAG_enable_embedded_constant_pool && !block->needs_frame()) {
302       ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
303       result_ = AssembleBlock(block);
304     } else {
305       result_ = AssembleBlock(block);
306     }
307     if (result_ != kSuccess) return;
308     unwinding_info_writer_.EndInstructionBlock(block);
309   }
310 
311   // Assemble all out-of-line code.
312   offsets_info_.out_of_line_code = tasm()->pc_offset();
313   if (ools_) {
314     tasm()->RecordComment("-- Out of line code --");
315     for (OutOfLineCode* ool = ools_; ool; ool = ool->next()) {
316       tasm()->bind(ool->entry());
317       ool->Generate();
318       if (ool->exit()->is_bound()) tasm()->jmp(ool->exit());
319     }
320   }
321 
322   // This nop operation is needed to ensure that the trampoline is not
323   // confused with the pc of the call before deoptimization.
324   // The test regress/regress-259 is an example of where we need it.
325   tasm()->nop();
326 
327   // For some targets, we must make sure that constant and veneer pools are
328   // emitted before emitting the deoptimization exits.
329   PrepareForDeoptimizationExits(&deoptimization_exits_);
330 
331   if (Deoptimizer::kSupportsFixedDeoptExitSizes) {
332     deopt_exit_start_offset_ = tasm()->pc_offset();
333   }
334 
335   // Assemble deoptimization exits.
336   offsets_info_.deoptimization_exits = tasm()->pc_offset();
337   int last_updated = 0;
338   // We sort the deoptimization exits here so that the lazy ones will
339   // be visited last. We need this as on architectures where
340   // Deoptimizer::kSupportsFixedDeoptExitSizes is true, lazy deopts
341   // might need additional instructions.
342   auto cmp = [](const DeoptimizationExit* a, const DeoptimizationExit* b) {
343     static_assert(DeoptimizeKind::kLazy == kLastDeoptimizeKind,
344                   "lazy deopts are expected to be emitted last");
345     if (a->kind() != b->kind()) {
346       return a->kind() < b->kind();
347     }
348     return a->pc_offset() < b->pc_offset();
349   };
350   if (Deoptimizer::kSupportsFixedDeoptExitSizes) {
351     std::sort(deoptimization_exits_.begin(), deoptimization_exits_.end(), cmp);
352   }
353 
354   for (DeoptimizationExit* exit : deoptimization_exits_) {
355     if (exit->emitted()) continue;
356     if (Deoptimizer::kSupportsFixedDeoptExitSizes) {
357       exit->set_deoptimization_id(next_deoptimization_id_++);
358     }
359     result_ = AssembleDeoptimizerCall(exit);
360     if (result_ != kSuccess) return;
361 
362     // UpdateDeoptimizationInfo expects lazy deopts to be visited in pc_offset
363     // order, which is always the case since they are added to
364     // deoptimization_exits_ in that order, and the optional sort operation
365     // above preserves that order.
366     if (exit->kind() == DeoptimizeKind::kLazy) {
367       int trampoline_pc = exit->label()->pos();
368       last_updated = safepoints()->UpdateDeoptimizationInfo(
369           exit->pc_offset(), trampoline_pc, last_updated,
370           exit->deoptimization_id());
371     }
372   }
373 
374   offsets_info_.pools = tasm()->pc_offset();
375   // TODO(jgruber): Move all inlined metadata generation into a new,
376   // architecture-independent version of FinishCode. Currently, this includes
377   // the safepoint table, handler table, constant pool, and code comments, in
378   // that order.
379   FinishCode();
380 
381   offsets_info_.jump_tables = tasm()->pc_offset();
382   // Emit the jump tables.
383   if (jump_tables_) {
384     tasm()->Align(kSystemPointerSize);
385     for (JumpTable* table = jump_tables_; table; table = table->next()) {
386       tasm()->bind(table->label());
387       AssembleJumpTable(table->targets(), table->target_count());
388     }
389   }
390 
391   // The PerfJitLogger logs code up until here, excluding the safepoint
392   // table. Resolve the unwinding info now so it is aware of the same code
393   // size as reported by perf.
394   unwinding_info_writer_.Finish(tasm()->pc_offset());
395 
396   // Final alignment before starting on the metadata section.
397   tasm()->Align(Code::kMetadataAlignment);
398 
399   safepoints()->Emit(tasm(), frame()->GetTotalFrameSlotCount());
400 
401   // Emit the exception handler table.
402   if (!handlers_.empty()) {
403     handler_table_offset_ = HandlerTable::EmitReturnTableStart(tasm());
404     for (size_t i = 0; i < handlers_.size(); ++i) {
405       HandlerTable::EmitReturnEntry(tasm(), handlers_[i].pc_offset,
406                                     handlers_[i].handler->pos());
407     }
408   }
409 
410   tasm()->MaybeEmitOutOfLineConstantPool();
411   tasm()->FinalizeJumpOptimizationInfo();
412 
413   result_ = kSuccess;
414 }
415 
TryInsertBranchPoisoning(const InstructionBlock * block)416 void CodeGenerator::TryInsertBranchPoisoning(const InstructionBlock* block) {
417   // See if our predecessor was a basic block terminated by a branch_and_poison
418   // instruction. If yes, then perform the masking based on the flags.
419   if (block->PredecessorCount() != 1) return;
420   RpoNumber pred_rpo = (block->predecessors())[0];
421   const InstructionBlock* pred = instructions()->InstructionBlockAt(pred_rpo);
422   if (pred->code_start() == pred->code_end()) return;
423   Instruction* instr = instructions()->InstructionAt(pred->code_end() - 1);
424   FlagsMode mode = FlagsModeField::decode(instr->opcode());
425   switch (mode) {
426     case kFlags_branch_and_poison: {
427       BranchInfo branch;
428       RpoNumber target = ComputeBranchInfo(&branch, instr);
429       if (!target.IsValid()) {
430         // Non-trivial branch, add the masking code.
431         FlagsCondition condition = branch.condition;
432         if (branch.false_label == GetLabel(block->rpo_number())) {
433           condition = NegateFlagsCondition(condition);
434         }
435         AssembleBranchPoisoning(condition, instr);
436       }
437       break;
438     }
439     case kFlags_deoptimize_and_poison: {
440       UNREACHABLE();
441     }
442     default:
443       break;
444   }
445 }
446 
AssembleArchBinarySearchSwitchRange(Register input,RpoNumber def_block,std::pair<int32_t,Label * > * begin,std::pair<int32_t,Label * > * end)447 void CodeGenerator::AssembleArchBinarySearchSwitchRange(
448     Register input, RpoNumber def_block, std::pair<int32_t, Label*>* begin,
449     std::pair<int32_t, Label*>* end) {
450   if (end - begin < kBinarySearchSwitchMinimalCases) {
451     while (begin != end) {
452       tasm()->JumpIfEqual(input, begin->first, begin->second);
453       ++begin;
454     }
455     AssembleArchJump(def_block);
456     return;
457   }
458   auto middle = begin + (end - begin) / 2;
459   Label less_label;
460   tasm()->JumpIfLessThan(input, middle->first, &less_label);
461   AssembleArchBinarySearchSwitchRange(input, def_block, middle, end);
462   tasm()->bind(&less_label);
463   AssembleArchBinarySearchSwitchRange(input, def_block, begin, middle);
464 }
465 
GetSourcePositionTable()466 OwnedVector<byte> CodeGenerator::GetSourcePositionTable() {
467   return source_position_table_builder_.ToSourcePositionTableVector();
468 }
469 
GetProtectedInstructionsData()470 OwnedVector<byte> CodeGenerator::GetProtectedInstructionsData() {
471   return OwnedVector<byte>::Of(
472       Vector<byte>::cast(VectorOf(protected_instructions_)));
473 }
474 
FinalizeCode()475 MaybeHandle<Code> CodeGenerator::FinalizeCode() {
476   if (result_ != kSuccess) {
477     tasm()->AbortedCodeGeneration();
478     return MaybeHandle<Code>();
479   }
480 
481   // Allocate the source position table.
482   Handle<ByteArray> source_positions =
483       source_position_table_builder_.ToSourcePositionTable(isolate());
484 
485   // Allocate deoptimization data.
486   Handle<DeoptimizationData> deopt_data = GenerateDeoptimizationData();
487 
488   // Allocate and install the code.
489   CodeDesc desc;
490   tasm()->GetCode(isolate(), &desc, safepoints(), handler_table_offset_);
491 
492 #if defined(V8_OS_WIN64)
493   if (Builtins::IsBuiltinId(info_->builtin_index())) {
494     isolate_->SetBuiltinUnwindData(info_->builtin_index(),
495                                    tasm()->GetUnwindInfo());
496   }
497 #endif  // V8_OS_WIN64
498 
499   if (unwinding_info_writer_.eh_frame_writer()) {
500     unwinding_info_writer_.eh_frame_writer()->GetEhFrame(&desc);
501   }
502 
503   MaybeHandle<Code> maybe_code =
504       Factory::CodeBuilder(isolate(), desc, info()->code_kind())
505           .set_builtin_index(info()->builtin_index())
506           .set_inlined_bytecode_size(info()->inlined_bytecode_size())
507           .set_source_position_table(source_positions)
508           .set_deoptimization_data(deopt_data)
509           .set_is_turbofanned()
510           .set_stack_slots(frame()->GetTotalFrameSlotCount())
511           .set_profiler_data(info()->profiler_data())
512           .TryBuild();
513 
514   Handle<Code> code;
515   if (!maybe_code.ToHandle(&code)) {
516     tasm()->AbortedCodeGeneration();
517     return MaybeHandle<Code>();
518   }
519 
520   // TODO(jgruber,v8:8888): Turn this into a DCHECK once confidence is
521   // high that the implementation is complete.
522   CHECK_IMPLIES(info()->IsNativeContextIndependent(),
523                 code->IsNativeContextIndependent(isolate()));
524 
525   // Counts both compiled code and metadata.
526   isolate()->counters()->total_compiled_code_size()->Increment(
527       code->raw_body_size());
528 
529   LOG_CODE_EVENT(isolate(),
530                  CodeLinePosInfoRecordEvent(code->raw_instruction_start(),
531                                             *source_positions));
532 
533   return code;
534 }
535 
IsNextInAssemblyOrder(RpoNumber block) const536 bool CodeGenerator::IsNextInAssemblyOrder(RpoNumber block) const {
537   return instructions()
538       ->InstructionBlockAt(current_block_)
539       ->ao_number()
540       .IsNext(instructions()->InstructionBlockAt(block)->ao_number());
541 }
542 
RecordSafepoint(ReferenceMap * references,Safepoint::DeoptMode deopt_mode)543 void CodeGenerator::RecordSafepoint(ReferenceMap* references,
544                                     Safepoint::DeoptMode deopt_mode) {
545   Safepoint safepoint = safepoints()->DefineSafepoint(tasm(), deopt_mode);
546   int stackSlotToSpillSlotDelta =
547       frame()->GetTotalFrameSlotCount() - frame()->GetSpillSlotCount();
548   for (const InstructionOperand& operand : references->reference_operands()) {
549     if (operand.IsStackSlot()) {
550       int index = LocationOperand::cast(operand).index();
551       DCHECK_LE(0, index);
552       // We might index values in the fixed part of the frame (i.e. the
553       // closure pointer or the context pointer); these are not spill slots
554       // and therefore don't work with the SafepointTable currently, but
555       // we also don't need to worry about them, since the GC has special
556       // knowledge about those fields anyway.
557       if (index < stackSlotToSpillSlotDelta) continue;
558       safepoint.DefinePointerSlot(index);
559     }
560   }
561 }
562 
IsMaterializableFromRoot(Handle<HeapObject> object,RootIndex * index_return)563 bool CodeGenerator::IsMaterializableFromRoot(Handle<HeapObject> object,
564                                              RootIndex* index_return) {
565   const CallDescriptor* incoming_descriptor =
566       linkage()->GetIncomingDescriptor();
567   if (incoming_descriptor->flags() & CallDescriptor::kCanUseRoots) {
568     return isolate()->roots_table().IsRootHandle(object, index_return) &&
569            RootsTable::IsImmortalImmovable(*index_return);
570   }
571   return false;
572 }
573 
AssembleBlock(const InstructionBlock * block)574 CodeGenerator::CodeGenResult CodeGenerator::AssembleBlock(
575     const InstructionBlock* block) {
576   if (block->IsHandler()) {
577     tasm()->ExceptionHandler();
578   }
579   for (int i = block->code_start(); i < block->code_end(); ++i) {
580     CodeGenResult result = AssembleInstruction(i, block);
581     if (result != kSuccess) return result;
582   }
583   return kSuccess;
584 }
585 
IsValidPush(InstructionOperand source,CodeGenerator::PushTypeFlags push_type)586 bool CodeGenerator::IsValidPush(InstructionOperand source,
587                                 CodeGenerator::PushTypeFlags push_type) {
588   if (source.IsImmediate() &&
589       ((push_type & CodeGenerator::kImmediatePush) != 0)) {
590     return true;
591   }
592   if (source.IsRegister() &&
593       ((push_type & CodeGenerator::kRegisterPush) != 0)) {
594     return true;
595   }
596   if (source.IsStackSlot() &&
597       ((push_type & CodeGenerator::kStackSlotPush) != 0)) {
598     return true;
599   }
600   return false;
601 }
602 
GetPushCompatibleMoves(Instruction * instr,PushTypeFlags push_type,ZoneVector<MoveOperands * > * pushes)603 void CodeGenerator::GetPushCompatibleMoves(Instruction* instr,
604                                            PushTypeFlags push_type,
605                                            ZoneVector<MoveOperands*>* pushes) {
606   static constexpr int first_push_compatible_index =
607       kReturnAddressStackSlotCount;
608   pushes->clear();
609   for (int i = Instruction::FIRST_GAP_POSITION;
610        i <= Instruction::LAST_GAP_POSITION; ++i) {
611     Instruction::GapPosition inner_pos =
612         static_cast<Instruction::GapPosition>(i);
613     ParallelMove* parallel_move = instr->GetParallelMove(inner_pos);
614     if (parallel_move != nullptr) {
615       for (auto move : *parallel_move) {
616         InstructionOperand source = move->source();
617         InstructionOperand destination = move->destination();
618         // If there are any moves from slots that will be overridden by pushes,
619         // then the full gap resolver must be used since optimization with
620         // pushes don't participate in the parallel move and might clobber
621         // values needed for the gap resolve.
622         if (source.IsAnyStackSlot() && LocationOperand::cast(source).index() >=
623                                            first_push_compatible_index) {
624           pushes->clear();
625           return;
626         }
627         // TODO(danno): Right now, only consider moves from the FIRST gap for
628         // pushes. Theoretically, we could extract pushes for both gaps (there
629         // are cases where this happens), but the logic for that would also have
630         // to check to make sure that non-memory inputs to the pushes from the
631         // LAST gap don't get clobbered in the FIRST gap.
632         if (i == Instruction::FIRST_GAP_POSITION) {
633           if (destination.IsStackSlot() &&
634               LocationOperand::cast(destination).index() >=
635                   first_push_compatible_index) {
636             int index = LocationOperand::cast(destination).index();
637             if (IsValidPush(source, push_type)) {
638               if (index >= static_cast<int>(pushes->size())) {
639                 pushes->resize(index + 1);
640               }
641               (*pushes)[index] = move;
642             }
643           }
644         }
645       }
646     }
647   }
648 
649   // For now, only support a set of continuous pushes at the end of the list.
650   size_t push_count_upper_bound = pushes->size();
651   size_t push_begin = push_count_upper_bound;
652   for (auto move : base::Reversed(*pushes)) {
653     if (move == nullptr) break;
654     push_begin--;
655   }
656   size_t push_count = pushes->size() - push_begin;
657   std::copy(pushes->begin() + push_begin,
658             pushes->begin() + push_begin + push_count, pushes->begin());
659   pushes->resize(push_count);
660 }
661 
InferMove(InstructionOperand * source,InstructionOperand * destination)662 CodeGenerator::MoveType::Type CodeGenerator::MoveType::InferMove(
663     InstructionOperand* source, InstructionOperand* destination) {
664   if (source->IsConstant()) {
665     if (destination->IsAnyRegister()) {
666       return MoveType::kConstantToRegister;
667     } else {
668       DCHECK(destination->IsAnyStackSlot());
669       return MoveType::kConstantToStack;
670     }
671   }
672   DCHECK(LocationOperand::cast(source)->IsCompatible(
673       LocationOperand::cast(destination)));
674   if (source->IsAnyRegister()) {
675     if (destination->IsAnyRegister()) {
676       return MoveType::kRegisterToRegister;
677     } else {
678       DCHECK(destination->IsAnyStackSlot());
679       return MoveType::kRegisterToStack;
680     }
681   } else {
682     DCHECK(source->IsAnyStackSlot());
683     if (destination->IsAnyRegister()) {
684       return MoveType::kStackToRegister;
685     } else {
686       DCHECK(destination->IsAnyStackSlot());
687       return MoveType::kStackToStack;
688     }
689   }
690 }
691 
InferSwap(InstructionOperand * source,InstructionOperand * destination)692 CodeGenerator::MoveType::Type CodeGenerator::MoveType::InferSwap(
693     InstructionOperand* source, InstructionOperand* destination) {
694   DCHECK(LocationOperand::cast(source)->IsCompatible(
695       LocationOperand::cast(destination)));
696   if (source->IsAnyRegister()) {
697     if (destination->IsAnyRegister()) {
698       return MoveType::kRegisterToRegister;
699     } else {
700       DCHECK(destination->IsAnyStackSlot());
701       return MoveType::kRegisterToStack;
702     }
703   } else {
704     DCHECK(source->IsAnyStackSlot());
705     DCHECK(destination->IsAnyStackSlot());
706     return MoveType::kStackToStack;
707   }
708 }
709 
ComputeBranchInfo(BranchInfo * branch,Instruction * instr)710 RpoNumber CodeGenerator::ComputeBranchInfo(BranchInfo* branch,
711                                            Instruction* instr) {
712   // Assemble a branch after this instruction.
713   InstructionOperandConverter i(this, instr);
714   RpoNumber true_rpo = i.InputRpo(instr->InputCount() - 2);
715   RpoNumber false_rpo = i.InputRpo(instr->InputCount() - 1);
716 
717   if (true_rpo == false_rpo) {
718     return true_rpo;
719   }
720   FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
721   if (IsNextInAssemblyOrder(true_rpo)) {
722     // true block is next, can fall through if condition negated.
723     std::swap(true_rpo, false_rpo);
724     condition = NegateFlagsCondition(condition);
725   }
726   branch->condition = condition;
727   branch->true_label = GetLabel(true_rpo);
728   branch->false_label = GetLabel(false_rpo);
729   branch->fallthru = IsNextInAssemblyOrder(false_rpo);
730   return RpoNumber::Invalid();
731 }
732 
AssembleInstruction(int instruction_index,const InstructionBlock * block)733 CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
734     int instruction_index, const InstructionBlock* block) {
735   Instruction* instr = instructions()->InstructionAt(instruction_index);
736   if (info()->trace_turbo_json()) {
737     instr_starts_[instruction_index].gap_pc_offset = tasm()->pc_offset();
738   }
739   int first_unused_stack_slot;
740   FlagsMode mode = FlagsModeField::decode(instr->opcode());
741   if (mode != kFlags_trap) {
742     AssembleSourcePosition(instr);
743   }
744   bool adjust_stack =
745       GetSlotAboveSPBeforeTailCall(instr, &first_unused_stack_slot);
746   if (adjust_stack) AssembleTailCallBeforeGap(instr, first_unused_stack_slot);
747   AssembleGaps(instr);
748   if (adjust_stack) AssembleTailCallAfterGap(instr, first_unused_stack_slot);
749   DCHECK_IMPLIES(
750       block->must_deconstruct_frame(),
751       instr != instructions()->InstructionAt(block->last_instruction_index()) ||
752           instr->IsRet() || instr->IsJump());
753   if (instr->IsJump() && block->must_deconstruct_frame()) {
754     AssembleDeconstructFrame();
755   }
756   if (info()->trace_turbo_json()) {
757     instr_starts_[instruction_index].arch_instr_pc_offset = tasm()->pc_offset();
758   }
759   // Assemble architecture-specific code for the instruction.
760   CodeGenResult result = AssembleArchInstruction(instr);
761   if (result != kSuccess) return result;
762 
763   if (info()->trace_turbo_json()) {
764     instr_starts_[instruction_index].condition_pc_offset = tasm()->pc_offset();
765   }
766 
767   FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
768   switch (mode) {
769     case kFlags_branch:
770     case kFlags_branch_and_poison: {
771       BranchInfo branch;
772       RpoNumber target = ComputeBranchInfo(&branch, instr);
773       if (target.IsValid()) {
774         // redundant branch.
775         if (!IsNextInAssemblyOrder(target)) {
776           AssembleArchJump(target);
777         }
778         return kSuccess;
779       }
780       // Assemble architecture-specific branch.
781       AssembleArchBranch(instr, &branch);
782       break;
783     }
784     case kFlags_deoptimize:
785     case kFlags_deoptimize_and_poison: {
786       // Assemble a conditional eager deoptimization after this instruction.
787       InstructionOperandConverter i(this, instr);
788       size_t frame_state_offset = MiscField::decode(instr->opcode());
789       DeoptimizationExit* const exit =
790           AddDeoptimizationExit(instr, frame_state_offset);
791       Label continue_label;
792       BranchInfo branch;
793       branch.condition = condition;
794       branch.true_label = exit->label();
795       branch.false_label = &continue_label;
796       branch.fallthru = true;
797       // Assemble architecture-specific branch.
798       AssembleArchDeoptBranch(instr, &branch);
799       tasm()->bind(&continue_label);
800       if (mode == kFlags_deoptimize_and_poison) {
801         AssembleBranchPoisoning(NegateFlagsCondition(branch.condition), instr);
802       }
803       break;
804     }
805     case kFlags_set: {
806       // Assemble a boolean materialization after this instruction.
807       AssembleArchBoolean(instr, condition);
808       break;
809     }
810     case kFlags_trap: {
811       AssembleArchTrap(instr, condition);
812       break;
813     }
814     case kFlags_none: {
815       break;
816     }
817   }
818 
819   // TODO(jarin) We should thread the flag through rather than set it.
820   if (instr->IsCall()) {
821     ResetSpeculationPoison();
822   }
823 
824   return kSuccess;
825 }
826 
AssembleSourcePosition(Instruction * instr)827 void CodeGenerator::AssembleSourcePosition(Instruction* instr) {
828   SourcePosition source_position = SourcePosition::Unknown();
829   if (instr->IsNop() && instr->AreMovesRedundant()) return;
830   if (!instructions()->GetSourcePosition(instr, &source_position)) return;
831   AssembleSourcePosition(source_position);
832 }
833 
AssembleSourcePosition(SourcePosition source_position)834 void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
835   if (source_position == current_source_position_) return;
836   current_source_position_ = source_position;
837   if (!source_position.IsKnown()) return;
838   source_position_table_builder_.AddPosition(tasm()->pc_offset(),
839                                              source_position, false);
840   if (FLAG_code_comments) {
841     OptimizedCompilationInfo* info = this->info();
842     if (!info->IsOptimizing() && !info->IsWasm()) return;
843     std::ostringstream buffer;
844     buffer << "-- ";
845     // Turbolizer only needs the source position, as it can reconstruct
846     // the inlining stack from other information.
847     if (info->trace_turbo_json() || !tasm()->isolate() ||
848         tasm()->isolate()->concurrent_recompilation_enabled()) {
849       buffer << source_position;
850     } else {
851       AllowHeapAllocation allocation;
852       AllowHandleAllocation handles;
853       AllowHandleDereference deref;
854       buffer << source_position.InliningStack(info);
855     }
856     buffer << " --";
857     tasm()->RecordComment(buffer.str().c_str());
858   }
859 }
860 
GetSlotAboveSPBeforeTailCall(Instruction * instr,int * slot)861 bool CodeGenerator::GetSlotAboveSPBeforeTailCall(Instruction* instr,
862                                                  int* slot) {
863   if (instr->IsTailCall()) {
864     InstructionOperandConverter g(this, instr);
865     *slot = g.InputInt32(instr->InputCount() - 1);
866     return true;
867   } else {
868     return false;
869   }
870 }
871 
DetermineStubCallMode() const872 StubCallMode CodeGenerator::DetermineStubCallMode() const {
873   CodeKind code_kind = info()->code_kind();
874   return (code_kind == CodeKind::WASM_FUNCTION ||
875           code_kind == CodeKind::WASM_TO_CAPI_FUNCTION ||
876           code_kind == CodeKind::WASM_TO_JS_FUNCTION)
877              ? StubCallMode::kCallWasmRuntimeStub
878              : StubCallMode::kCallCodeObject;
879 }
880 
AssembleGaps(Instruction * instr)881 void CodeGenerator::AssembleGaps(Instruction* instr) {
882   for (int i = Instruction::FIRST_GAP_POSITION;
883        i <= Instruction::LAST_GAP_POSITION; i++) {
884     Instruction::GapPosition inner_pos =
885         static_cast<Instruction::GapPosition>(i);
886     ParallelMove* move = instr->GetParallelMove(inner_pos);
887     if (move != nullptr) resolver()->Resolve(move);
888   }
889 }
890 
891 namespace {
892 
CreateInliningPositions(OptimizedCompilationInfo * info,Isolate * isolate)893 Handle<PodArray<InliningPosition>> CreateInliningPositions(
894     OptimizedCompilationInfo* info, Isolate* isolate) {
895   const OptimizedCompilationInfo::InlinedFunctionList& inlined_functions =
896       info->inlined_functions();
897   if (inlined_functions.size() == 0) {
898     return Handle<PodArray<InliningPosition>>::cast(
899         isolate->factory()->empty_byte_array());
900   }
901   Handle<PodArray<InliningPosition>> inl_positions =
902       PodArray<InliningPosition>::New(
903           isolate, static_cast<int>(inlined_functions.size()),
904           AllocationType::kOld);
905   for (size_t i = 0; i < inlined_functions.size(); ++i) {
906     inl_positions->set(static_cast<int>(i), inlined_functions[i].position);
907   }
908   return inl_positions;
909 }
910 
911 }  // namespace
912 
GenerateDeoptimizationData()913 Handle<DeoptimizationData> CodeGenerator::GenerateDeoptimizationData() {
914   OptimizedCompilationInfo* info = this->info();
915   int deopt_count = static_cast<int>(deoptimization_exits_.size());
916   if (deopt_count == 0 && !info->is_osr()) {
917     return DeoptimizationData::Empty(isolate());
918   }
919   Handle<DeoptimizationData> data =
920       DeoptimizationData::New(isolate(), deopt_count, AllocationType::kOld);
921 
922   Handle<ByteArray> translation_array =
923       translations_.CreateByteArray(isolate()->factory());
924 
925   data->SetTranslationByteArray(*translation_array);
926   data->SetInlinedFunctionCount(
927       Smi::FromInt(static_cast<int>(inlined_function_count_)));
928   data->SetOptimizationId(Smi::FromInt(info->optimization_id()));
929 
930   data->SetDeoptExitStart(Smi::FromInt(deopt_exit_start_offset_));
931   data->SetNonLazyDeoptCount(Smi::FromInt(non_lazy_deopt_count_));
932 
933   if (info->has_shared_info()) {
934     data->SetSharedFunctionInfo(*info->shared_info());
935   } else {
936     data->SetSharedFunctionInfo(Smi::zero());
937   }
938 
939   Handle<FixedArray> literals = isolate()->factory()->NewFixedArray(
940       static_cast<int>(deoptimization_literals_.size()), AllocationType::kOld);
941   for (unsigned i = 0; i < deoptimization_literals_.size(); i++) {
942     Handle<Object> object = deoptimization_literals_[i].Reify(isolate());
943     CHECK(!object.is_null());
944     literals->set(i, *object);
945   }
946   data->SetLiteralArray(*literals);
947 
948   Handle<PodArray<InliningPosition>> inl_pos =
949       CreateInliningPositions(info, isolate());
950   data->SetInliningPositions(*inl_pos);
951 
952   if (info->is_osr()) {
953     DCHECK_LE(0, osr_pc_offset_);
954     data->SetOsrBytecodeOffset(Smi::FromInt(info_->osr_offset().ToInt()));
955     data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
956   } else {
957     BailoutId osr_offset = BailoutId::None();
958     data->SetOsrBytecodeOffset(Smi::FromInt(osr_offset.ToInt()));
959     data->SetOsrPcOffset(Smi::FromInt(-1));
960   }
961 
962   // Populate deoptimization entries.
963   for (int i = 0; i < deopt_count; i++) {
964     DeoptimizationExit* deoptimization_exit = deoptimization_exits_[i];
965     CHECK_NOT_NULL(deoptimization_exit);
966     DCHECK_EQ(i, deoptimization_exit->deoptimization_id());
967     data->SetBytecodeOffset(i, deoptimization_exit->bailout_id());
968     data->SetTranslationIndex(
969         i, Smi::FromInt(deoptimization_exit->translation_id()));
970     data->SetPc(i, Smi::FromInt(deoptimization_exit->pc_offset()));
971   }
972 
973   return data;
974 }
975 
AddJumpTable(Label ** targets,size_t target_count)976 Label* CodeGenerator::AddJumpTable(Label** targets, size_t target_count) {
977   jump_tables_ = zone()->New<JumpTable>(jump_tables_, targets, target_count);
978   return jump_tables_->label();
979 }
980 
RecordCallPosition(Instruction * instr)981 void CodeGenerator::RecordCallPosition(Instruction* instr) {
982   const bool needs_frame_state =
983       instr->HasCallDescriptorFlag(CallDescriptor::kNeedsFrameState);
984   RecordSafepoint(instr->reference_map(), needs_frame_state
985                                               ? Safepoint::kLazyDeopt
986                                               : Safepoint::kNoLazyDeopt);
987 
988   if (instr->HasCallDescriptorFlag(CallDescriptor::kHasExceptionHandler)) {
989     InstructionOperandConverter i(this, instr);
990     RpoNumber handler_rpo = i.InputRpo(instr->InputCount() - 1);
991     DCHECK(instructions()->InstructionBlockAt(handler_rpo)->IsHandler());
992     handlers_.push_back(
993         {GetLabel(handler_rpo), tasm()->pc_offset_for_safepoint()});
994   }
995 
996   if (needs_frame_state) {
997     MarkLazyDeoptSite();
998     // If the frame state is present, it starts at argument 2 - after
999     // the code address and the poison-alias index.
1000     size_t frame_state_offset = 2;
1001     FrameStateDescriptor* descriptor =
1002         GetDeoptimizationEntry(instr, frame_state_offset).descriptor();
1003     int pc_offset = tasm()->pc_offset_for_safepoint();
1004     BuildTranslation(instr, pc_offset, frame_state_offset,
1005                      descriptor->state_combine());
1006   }
1007 }
1008 
DefineDeoptimizationLiteral(DeoptimizationLiteral literal)1009 int CodeGenerator::DefineDeoptimizationLiteral(DeoptimizationLiteral literal) {
1010   literal.Validate();
1011   int result = static_cast<int>(deoptimization_literals_.size());
1012   for (unsigned i = 0; i < deoptimization_literals_.size(); ++i) {
1013     deoptimization_literals_[i].Validate();
1014     if (deoptimization_literals_[i] == literal) return i;
1015   }
1016   deoptimization_literals_.push_back(literal);
1017   return result;
1018 }
1019 
GetDeoptimizationEntry(Instruction * instr,size_t frame_state_offset)1020 DeoptimizationEntry const& CodeGenerator::GetDeoptimizationEntry(
1021     Instruction* instr, size_t frame_state_offset) {
1022   InstructionOperandConverter i(this, instr);
1023   int const state_id = i.InputInt32(frame_state_offset);
1024   return instructions()->GetDeoptimizationEntry(state_id);
1025 }
1026 
TranslateStateValueDescriptor(StateValueDescriptor * desc,StateValueList * nested,Translation * translation,InstructionOperandIterator * iter)1027 void CodeGenerator::TranslateStateValueDescriptor(
1028     StateValueDescriptor* desc, StateValueList* nested,
1029     Translation* translation, InstructionOperandIterator* iter) {
1030   // Note:
1031   // If translation is null, we just skip the relevant instruction operands.
1032   if (desc->IsNested()) {
1033     if (translation != nullptr) {
1034       translation->BeginCapturedObject(static_cast<int>(nested->size()));
1035     }
1036     for (auto field : *nested) {
1037       TranslateStateValueDescriptor(field.desc, field.nested, translation,
1038                                     iter);
1039     }
1040   } else if (desc->IsArgumentsElements()) {
1041     if (translation != nullptr) {
1042       translation->ArgumentsElements(desc->arguments_type());
1043     }
1044   } else if (desc->IsArgumentsLength()) {
1045     if (translation != nullptr) {
1046       translation->ArgumentsLength();
1047     }
1048   } else if (desc->IsDuplicate()) {
1049     if (translation != nullptr) {
1050       translation->DuplicateObject(static_cast<int>(desc->id()));
1051     }
1052   } else if (desc->IsPlain()) {
1053     InstructionOperand* op = iter->Advance();
1054     if (translation != nullptr) {
1055       AddTranslationForOperand(translation, iter->instruction(), op,
1056                                desc->type());
1057     }
1058   } else {
1059     DCHECK(desc->IsOptimizedOut());
1060     if (translation != nullptr) {
1061       if (optimized_out_literal_id_ == -1) {
1062         optimized_out_literal_id_ = DefineDeoptimizationLiteral(
1063             DeoptimizationLiteral(isolate()->factory()->optimized_out()));
1064       }
1065       translation->StoreLiteral(optimized_out_literal_id_);
1066     }
1067   }
1068 }
1069 
TranslateFrameStateDescriptorOperands(FrameStateDescriptor * desc,InstructionOperandIterator * iter,Translation * translation)1070 void CodeGenerator::TranslateFrameStateDescriptorOperands(
1071     FrameStateDescriptor* desc, InstructionOperandIterator* iter,
1072     Translation* translation) {
1073   size_t index = 0;
1074   StateValueList* values = desc->GetStateValueDescriptors();
1075   for (StateValueList::iterator it = values->begin(); it != values->end();
1076        ++it, ++index) {
1077     TranslateStateValueDescriptor((*it).desc, (*it).nested, translation, iter);
1078   }
1079   DCHECK_EQ(desc->GetSize(), index);
1080 }
1081 
BuildTranslationForFrameStateDescriptor(FrameStateDescriptor * descriptor,InstructionOperandIterator * iter,Translation * translation,OutputFrameStateCombine state_combine)1082 void CodeGenerator::BuildTranslationForFrameStateDescriptor(
1083     FrameStateDescriptor* descriptor, InstructionOperandIterator* iter,
1084     Translation* translation, OutputFrameStateCombine state_combine) {
1085   // Outer-most state must be added to translation first.
1086   if (descriptor->outer_state() != nullptr) {
1087     BuildTranslationForFrameStateDescriptor(descriptor->outer_state(), iter,
1088                                             translation, state_combine);
1089   }
1090 
1091   Handle<SharedFunctionInfo> shared_info;
1092   if (!descriptor->shared_info().ToHandle(&shared_info)) {
1093     if (!info()->has_shared_info()) {
1094       return;  // Stub with no SharedFunctionInfo.
1095     }
1096     shared_info = info()->shared_info();
1097   }
1098 
1099   const BailoutId bailout_id = descriptor->bailout_id();
1100   const int shared_info_id =
1101       DefineDeoptimizationLiteral(DeoptimizationLiteral(shared_info));
1102   const unsigned int height =
1103       static_cast<unsigned int>(descriptor->GetHeight());
1104 
1105   switch (descriptor->type()) {
1106     case FrameStateType::kInterpretedFunction: {
1107       int return_offset = 0;
1108       int return_count = 0;
1109       if (!state_combine.IsOutputIgnored()) {
1110         return_offset = static_cast<int>(state_combine.GetOffsetToPokeAt());
1111         return_count = static_cast<int>(iter->instruction()->OutputCount());
1112       }
1113       translation->BeginInterpretedFrame(bailout_id, shared_info_id, height,
1114                                          return_offset, return_count);
1115       break;
1116     }
1117     case FrameStateType::kArgumentsAdaptor:
1118       translation->BeginArgumentsAdaptorFrame(shared_info_id, height);
1119       break;
1120     case FrameStateType::kConstructStub:
1121       DCHECK(bailout_id.IsValidForConstructStub());
1122       translation->BeginConstructStubFrame(bailout_id, shared_info_id, height);
1123       break;
1124     case FrameStateType::kBuiltinContinuation: {
1125       translation->BeginBuiltinContinuationFrame(bailout_id, shared_info_id,
1126                                                  height);
1127       break;
1128     }
1129     case FrameStateType::kJavaScriptBuiltinContinuation: {
1130       translation->BeginJavaScriptBuiltinContinuationFrame(
1131           bailout_id, shared_info_id, height);
1132       break;
1133     }
1134     case FrameStateType::kJavaScriptBuiltinContinuationWithCatch: {
1135       translation->BeginJavaScriptBuiltinContinuationWithCatchFrame(
1136           bailout_id, shared_info_id, height);
1137       break;
1138     }
1139   }
1140 
1141   TranslateFrameStateDescriptorOperands(descriptor, iter, translation);
1142 }
1143 
BuildTranslation(Instruction * instr,int pc_offset,size_t frame_state_offset,OutputFrameStateCombine state_combine)1144 DeoptimizationExit* CodeGenerator::BuildTranslation(
1145     Instruction* instr, int pc_offset, size_t frame_state_offset,
1146     OutputFrameStateCombine state_combine) {
1147   DeoptimizationEntry const& entry =
1148       GetDeoptimizationEntry(instr, frame_state_offset);
1149   FrameStateDescriptor* const descriptor = entry.descriptor();
1150   frame_state_offset++;
1151 
1152   int update_feedback_count = entry.feedback().IsValid() ? 1 : 0;
1153   Translation translation(&translations_,
1154                           static_cast<int>(descriptor->GetFrameCount()),
1155                           static_cast<int>(descriptor->GetJSFrameCount()),
1156                           update_feedback_count, zone());
1157   if (entry.feedback().IsValid()) {
1158     DeoptimizationLiteral literal =
1159         DeoptimizationLiteral(entry.feedback().vector);
1160     int literal_id = DefineDeoptimizationLiteral(literal);
1161     translation.AddUpdateFeedback(literal_id, entry.feedback().slot.ToInt());
1162   }
1163   InstructionOperandIterator iter(instr, frame_state_offset);
1164   BuildTranslationForFrameStateDescriptor(descriptor, &iter, &translation,
1165                                           state_combine);
1166 
1167   DeoptimizationExit* const exit = zone()->New<DeoptimizationExit>(
1168       current_source_position_, descriptor->bailout_id(), translation.index(),
1169       pc_offset, entry.kind(), entry.reason());
1170 
1171   if (!Deoptimizer::kSupportsFixedDeoptExitSizes) {
1172     exit->set_deoptimization_id(next_deoptimization_id_++);
1173   }
1174 
1175   deoptimization_exits_.push_back(exit);
1176   return exit;
1177 }
1178 
AddTranslationForOperand(Translation * translation,Instruction * instr,InstructionOperand * op,MachineType type)1179 void CodeGenerator::AddTranslationForOperand(Translation* translation,
1180                                              Instruction* instr,
1181                                              InstructionOperand* op,
1182                                              MachineType type) {
1183   if (op->IsStackSlot()) {
1184     if (type.representation() == MachineRepresentation::kBit) {
1185       translation->StoreBoolStackSlot(LocationOperand::cast(op)->index());
1186     } else if (type == MachineType::Int8() || type == MachineType::Int16() ||
1187                type == MachineType::Int32()) {
1188       translation->StoreInt32StackSlot(LocationOperand::cast(op)->index());
1189     } else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
1190                type == MachineType::Uint32()) {
1191       translation->StoreUint32StackSlot(LocationOperand::cast(op)->index());
1192     } else if (type == MachineType::Int64()) {
1193       translation->StoreInt64StackSlot(LocationOperand::cast(op)->index());
1194     } else {
1195 #if defined(V8_COMPRESS_POINTERS)
1196       CHECK(MachineRepresentation::kTagged == type.representation() ||
1197             MachineRepresentation::kCompressed == type.representation());
1198 #else
1199       CHECK(MachineRepresentation::kTagged == type.representation());
1200 #endif
1201       translation->StoreStackSlot(LocationOperand::cast(op)->index());
1202     }
1203   } else if (op->IsFPStackSlot()) {
1204     if (type.representation() == MachineRepresentation::kFloat64) {
1205       translation->StoreDoubleStackSlot(LocationOperand::cast(op)->index());
1206     } else {
1207       CHECK_EQ(MachineRepresentation::kFloat32, type.representation());
1208       translation->StoreFloatStackSlot(LocationOperand::cast(op)->index());
1209     }
1210   } else if (op->IsRegister()) {
1211     InstructionOperandConverter converter(this, instr);
1212     if (type.representation() == MachineRepresentation::kBit) {
1213       translation->StoreBoolRegister(converter.ToRegister(op));
1214     } else if (type == MachineType::Int8() || type == MachineType::Int16() ||
1215                type == MachineType::Int32()) {
1216       translation->StoreInt32Register(converter.ToRegister(op));
1217     } else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
1218                type == MachineType::Uint32()) {
1219       translation->StoreUint32Register(converter.ToRegister(op));
1220     } else if (type == MachineType::Int64()) {
1221       translation->StoreInt64Register(converter.ToRegister(op));
1222     } else {
1223 #if defined(V8_COMPRESS_POINTERS)
1224       CHECK(MachineRepresentation::kTagged == type.representation() ||
1225             MachineRepresentation::kCompressed == type.representation());
1226 #else
1227       CHECK(MachineRepresentation::kTagged == type.representation());
1228 #endif
1229       translation->StoreRegister(converter.ToRegister(op));
1230     }
1231   } else if (op->IsFPRegister()) {
1232     InstructionOperandConverter converter(this, instr);
1233     if (type.representation() == MachineRepresentation::kFloat64) {
1234       translation->StoreDoubleRegister(converter.ToDoubleRegister(op));
1235     } else {
1236       CHECK_EQ(MachineRepresentation::kFloat32, type.representation());
1237       translation->StoreFloatRegister(converter.ToFloatRegister(op));
1238     }
1239   } else {
1240     CHECK(op->IsImmediate());
1241     InstructionOperandConverter converter(this, instr);
1242     Constant constant = converter.ToConstant(op);
1243     DeoptimizationLiteral literal;
1244     switch (constant.type()) {
1245       case Constant::kInt32:
1246         if (type.representation() == MachineRepresentation::kTagged) {
1247           // When pointers are 4 bytes, we can use int32 constants to represent
1248           // Smis.
1249           DCHECK_EQ(4, kSystemPointerSize);
1250           Smi smi(static_cast<Address>(constant.ToInt32()));
1251           DCHECK(smi.IsSmi());
1252           literal = DeoptimizationLiteral(smi.value());
1253         } else if (type.representation() == MachineRepresentation::kBit) {
1254           if (constant.ToInt32() == 0) {
1255             literal =
1256                 DeoptimizationLiteral(isolate()->factory()->false_value());
1257           } else {
1258             DCHECK_EQ(1, constant.ToInt32());
1259             literal = DeoptimizationLiteral(isolate()->factory()->true_value());
1260           }
1261         } else {
1262           DCHECK(type == MachineType::Int32() ||
1263                  type == MachineType::Uint32() ||
1264                  type.representation() == MachineRepresentation::kWord32 ||
1265                  type.representation() == MachineRepresentation::kNone);
1266           DCHECK(type.representation() != MachineRepresentation::kNone ||
1267                  constant.ToInt32() == FrameStateDescriptor::kImpossibleValue);
1268           if (type == MachineType::Uint32()) {
1269             literal = DeoptimizationLiteral(
1270                 static_cast<uint32_t>(constant.ToInt32()));
1271           } else {
1272             literal = DeoptimizationLiteral(constant.ToInt32());
1273           }
1274         }
1275         break;
1276       case Constant::kInt64:
1277         DCHECK_EQ(8, kSystemPointerSize);
1278         if (type.representation() == MachineRepresentation::kWord64) {
1279           literal =
1280               DeoptimizationLiteral(static_cast<double>(constant.ToInt64()));
1281         } else {
1282           // When pointers are 8 bytes, we can use int64 constants to represent
1283           // Smis.
1284           DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
1285           Smi smi(static_cast<Address>(constant.ToInt64()));
1286           DCHECK(smi.IsSmi());
1287           literal = DeoptimizationLiteral(smi.value());
1288         }
1289         break;
1290       case Constant::kFloat32:
1291         DCHECK(type.representation() == MachineRepresentation::kFloat32 ||
1292                type.representation() == MachineRepresentation::kTagged);
1293         literal = DeoptimizationLiteral(constant.ToFloat32());
1294         break;
1295       case Constant::kFloat64:
1296         DCHECK(type.representation() == MachineRepresentation::kFloat64 ||
1297                type.representation() == MachineRepresentation::kTagged);
1298         literal = DeoptimizationLiteral(constant.ToFloat64().value());
1299         break;
1300       case Constant::kHeapObject:
1301         DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
1302         literal = DeoptimizationLiteral(constant.ToHeapObject());
1303         break;
1304       case Constant::kCompressedHeapObject:
1305         DCHECK_EQ(MachineType::AnyTagged(), type);
1306         literal = DeoptimizationLiteral(constant.ToHeapObject());
1307         break;
1308       case Constant::kDelayedStringConstant:
1309         DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
1310         literal = DeoptimizationLiteral(constant.ToDelayedStringConstant());
1311         break;
1312       default:
1313         UNREACHABLE();
1314     }
1315     if (literal.object().equals(info()->closure())) {
1316       translation->StoreJSFrameFunction();
1317     } else {
1318       int literal_id = DefineDeoptimizationLiteral(literal);
1319       translation->StoreLiteral(literal_id);
1320     }
1321   }
1322 }
1323 
MarkLazyDeoptSite()1324 void CodeGenerator::MarkLazyDeoptSite() {
1325   last_lazy_deopt_pc_ = tasm()->pc_offset();
1326 }
1327 
AddDeoptimizationExit(Instruction * instr,size_t frame_state_offset)1328 DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
1329     Instruction* instr, size_t frame_state_offset) {
1330   return BuildTranslation(instr, -1, frame_state_offset,
1331                           OutputFrameStateCombine::Ignore());
1332 }
1333 
InitializeSpeculationPoison()1334 void CodeGenerator::InitializeSpeculationPoison() {
1335   if (poisoning_level_ == PoisoningMitigationLevel::kDontPoison) return;
1336 
1337   // Initialize {kSpeculationPoisonRegister} either by comparing the expected
1338   // with the actual call target, or by unconditionally using {-1} initially.
1339   // Masking register arguments with it only makes sense in the first case.
1340   if (info()->called_with_code_start_register()) {
1341     tasm()->RecordComment("-- Prologue: generate speculation poison --");
1342     GenerateSpeculationPoisonFromCodeStartRegister();
1343     if (info()->poison_register_arguments()) {
1344       AssembleRegisterArgumentPoisoning();
1345     }
1346   } else {
1347     ResetSpeculationPoison();
1348   }
1349 }
1350 
ResetSpeculationPoison()1351 void CodeGenerator::ResetSpeculationPoison() {
1352   if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
1353     tasm()->ResetSpeculationPoisonRegister();
1354   }
1355 }
1356 
OutOfLineCode(CodeGenerator * gen)1357 OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
1358     : frame_(gen->frame()), tasm_(gen->tasm()), next_(gen->ools_) {
1359   gen->ools_ = this;
1360 }
1361 
1362 OutOfLineCode::~OutOfLineCode() = default;
1363 
Reify(Isolate * isolate) const1364 Handle<Object> DeoptimizationLiteral::Reify(Isolate* isolate) const {
1365   Validate();
1366   switch (kind_) {
1367     case DeoptimizationLiteralKind::kObject: {
1368       return object_;
1369     }
1370     case DeoptimizationLiteralKind::kNumber: {
1371       return isolate->factory()->NewNumber(number_);
1372     }
1373     case DeoptimizationLiteralKind::kString: {
1374       return string_->AllocateStringConstant(isolate);
1375     }
1376     case DeoptimizationLiteralKind::kInvalid: {
1377       UNREACHABLE();
1378     }
1379   }
1380   UNREACHABLE();
1381 }
1382 
1383 }  // namespace compiler
1384 }  // namespace internal
1385 }  // namespace v8
1386