• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/backend/code-generator.h"
6 
7 #include "src/base/iterator.h"
8 #include "src/codegen/assembler-inl.h"
9 #include "src/codegen/macro-assembler-inl.h"
10 #include "src/codegen/optimized-compilation-info.h"
11 #include "src/codegen/string-constants.h"
12 #include "src/compiler/backend/code-generator-impl.h"
13 #include "src/compiler/globals.h"
14 #include "src/compiler/linkage.h"
15 #include "src/compiler/pipeline.h"
16 #include "src/diagnostics/eh-frame.h"
17 #include "src/execution/frames.h"
18 #include "src/logging/counters.h"
19 #include "src/logging/log.h"
20 #include "src/objects/smi.h"
21 #include "src/utils/address-map.h"
22 
23 namespace v8 {
24 namespace internal {
25 namespace compiler {
26 
27 class CodeGenerator::JumpTable final : public ZoneObject {
28  public:
JumpTable(JumpTable * next,Label ** targets,size_t target_count)29   JumpTable(JumpTable* next, Label** targets, size_t target_count)
30       : next_(next), targets_(targets), target_count_(target_count) {}
31 
label()32   Label* label() { return &label_; }
next() const33   JumpTable* next() const { return next_; }
targets() const34   Label** targets() const { return targets_; }
target_count() const35   size_t target_count() const { return target_count_; }
36 
37  private:
38   Label label_;
39   JumpTable* const next_;
40   Label** const targets_;
41   size_t const target_count_;
42 };
43 
CodeGenerator(Zone * codegen_zone,Frame * frame,Linkage * linkage,InstructionSequence * instructions,OptimizedCompilationInfo * info,Isolate * isolate,base::Optional<OsrHelper> osr_helper,int start_source_position,JumpOptimizationInfo * jump_opt,const AssemblerOptions & options,Builtin builtin,size_t max_unoptimized_frame_height,size_t max_pushed_argument_count,const char * debug_name)44 CodeGenerator::CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
45                              InstructionSequence* instructions,
46                              OptimizedCompilationInfo* info, Isolate* isolate,
47                              base::Optional<OsrHelper> osr_helper,
48                              int start_source_position,
49                              JumpOptimizationInfo* jump_opt,
50                              const AssemblerOptions& options, Builtin builtin,
51                              size_t max_unoptimized_frame_height,
52                              size_t max_pushed_argument_count,
53                              const char* debug_name)
54     : zone_(codegen_zone),
55       isolate_(isolate),
56       frame_access_state_(nullptr),
57       linkage_(linkage),
58       instructions_(instructions),
59       unwinding_info_writer_(codegen_zone),
60       info_(info),
61       labels_(
62           codegen_zone->NewArray<Label>(instructions->InstructionBlockCount())),
63       current_block_(RpoNumber::Invalid()),
64       start_source_position_(start_source_position),
65       current_source_position_(SourcePosition::Unknown()),
66       tasm_(isolate, options, CodeObjectRequired::kNo),
67       resolver_(this),
68       safepoints_(codegen_zone),
69       handlers_(codegen_zone),
70       deoptimization_exits_(codegen_zone),
71       deoptimization_literals_(codegen_zone),
72       translations_(codegen_zone),
73       max_unoptimized_frame_height_(max_unoptimized_frame_height),
74       max_pushed_argument_count_(max_pushed_argument_count),
75       caller_registers_saved_(false),
76       jump_tables_(nullptr),
77       ools_(nullptr),
78       osr_helper_(std::move(osr_helper)),
79       osr_pc_offset_(-1),
80       optimized_out_literal_id_(-1),
81       source_position_table_builder_(
82           codegen_zone, SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS),
83       protected_instructions_(codegen_zone),
84       result_(kSuccess),
85       block_starts_(codegen_zone),
86       instr_starts_(codegen_zone),
87       debug_name_(debug_name) {
88   for (int i = 0; i < instructions->InstructionBlockCount(); ++i) {
89     new (&labels_[i]) Label;
90   }
91   CreateFrameAccessState(frame);
92   CHECK_EQ(info->is_osr(), osr_helper_.has_value());
93   tasm_.set_jump_optimization_info(jump_opt);
94   CodeKind code_kind = info->code_kind();
95   if (code_kind == CodeKind::WASM_FUNCTION ||
96       code_kind == CodeKind::WASM_TO_CAPI_FUNCTION ||
97       code_kind == CodeKind::WASM_TO_JS_FUNCTION ||
98       code_kind == CodeKind::JS_TO_WASM_FUNCTION) {
99     tasm_.set_abort_hard(true);
100   }
101   tasm_.set_builtin(builtin);
102 }
103 
wasm_runtime_exception_support() const104 bool CodeGenerator::wasm_runtime_exception_support() const {
105   DCHECK_NOT_NULL(info_);
106   return info_->wasm_runtime_exception_support();
107 }
108 
AddProtectedInstructionLanding(uint32_t instr_offset,uint32_t landing_offset)109 void CodeGenerator::AddProtectedInstructionLanding(uint32_t instr_offset,
110                                                    uint32_t landing_offset) {
111   protected_instructions_.push_back({instr_offset, landing_offset});
112 }
113 
CreateFrameAccessState(Frame * frame)114 void CodeGenerator::CreateFrameAccessState(Frame* frame) {
115   FinishFrame(frame);
116   frame_access_state_ = zone()->New<FrameAccessState>(frame);
117 }
118 
ShouldApplyOffsetToStackCheck(Instruction * instr,uint32_t * offset)119 bool CodeGenerator::ShouldApplyOffsetToStackCheck(Instruction* instr,
120                                                   uint32_t* offset) {
121   DCHECK_EQ(instr->arch_opcode(), kArchStackPointerGreaterThan);
122 
123   StackCheckKind kind =
124       static_cast<StackCheckKind>(MiscField::decode(instr->opcode()));
125   if (kind != StackCheckKind::kJSFunctionEntry) return false;
126 
127   uint32_t stack_check_offset = *offset = GetStackCheckOffset();
128   return stack_check_offset > kStackLimitSlackForDeoptimizationInBytes;
129 }
130 
GetStackCheckOffset()131 uint32_t CodeGenerator::GetStackCheckOffset() {
132   if (!frame_access_state()->has_frame()) {
133     DCHECK_EQ(max_unoptimized_frame_height_, 0);
134     DCHECK_EQ(max_pushed_argument_count_, 0);
135     return 0;
136   }
137 
138   int32_t optimized_frame_height =
139       frame()->GetTotalFrameSlotCount() * kSystemPointerSize;
140   DCHECK(is_int32(max_unoptimized_frame_height_));
141   int32_t signed_max_unoptimized_frame_height =
142       static_cast<int32_t>(max_unoptimized_frame_height_);
143 
144   // The offset is either the delta between the optimized frames and the
145   // interpreted frame, or the maximal number of bytes pushed to the stack
146   // while preparing for function calls, whichever is bigger.
147   uint32_t frame_height_delta = static_cast<uint32_t>(std::max(
148       signed_max_unoptimized_frame_height - optimized_frame_height, 0));
149   uint32_t max_pushed_argument_bytes =
150       static_cast<uint32_t>(max_pushed_argument_count_ * kSystemPointerSize);
151   return std::max(frame_height_delta, max_pushed_argument_bytes);
152 }
153 
AssembleDeoptimizerCall(DeoptimizationExit * exit)154 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
155     DeoptimizationExit* exit) {
156   int deoptimization_id = exit->deoptimization_id();
157   if (deoptimization_id > Deoptimizer::kMaxNumberOfEntries) {
158     return kTooManyDeoptimizationBailouts;
159   }
160 
161   DeoptimizeKind deopt_kind = exit->kind();
162   DeoptimizeReason deoptimization_reason = exit->reason();
163   Label* jump_deoptimization_entry_label =
164       &jump_deoptimization_entry_labels_[static_cast<int>(deopt_kind)];
165   if (info()->source_positions()) {
166     tasm()->RecordDeoptReason(deoptimization_reason, exit->node_id(),
167                               exit->pos(), deoptimization_id);
168   }
169 
170   if (deopt_kind == DeoptimizeKind::kLazy) {
171     ++lazy_deopt_count_;
172     tasm()->BindExceptionHandler(exit->label());
173   } else {
174     ++eager_deopt_count_;
175     tasm()->bind(exit->label());
176   }
177   Builtin target = Deoptimizer::GetDeoptimizationEntry(deopt_kind);
178   tasm()->CallForDeoptimization(target, deoptimization_id, exit->label(),
179                                 deopt_kind, exit->continue_label(),
180                                 jump_deoptimization_entry_label);
181 
182   exit->set_emitted();
183 
184   return kSuccess;
185 }
186 
MaybeEmitOutOfLineConstantPool()187 void CodeGenerator::MaybeEmitOutOfLineConstantPool() {
188   tasm()->MaybeEmitOutOfLineConstantPool();
189 }
190 
AssembleCode()191 void CodeGenerator::AssembleCode() {
192   OptimizedCompilationInfo* info = this->info();
193 
194   // Open a frame scope to indicate that there is a frame on the stack.  The
195   // MANUAL indicates that the scope shouldn't actually generate code to set up
196   // the frame (that is done in AssemblePrologue).
197   FrameScope frame_scope(tasm(), StackFrame::MANUAL);
198 
199   if (info->source_positions()) {
200     AssembleSourcePosition(start_source_position());
201   }
202   offsets_info_.code_start_register_check = tasm()->pc_offset();
203 
204   tasm()->CodeEntry();
205 
206   // Check that {kJavaScriptCallCodeStartRegister} has been set correctly.
207   if (FLAG_debug_code && info->called_with_code_start_register()) {
208     tasm()->RecordComment("-- Prologue: check code start register --");
209     AssembleCodeStartRegisterCheck();
210   }
211 
212   offsets_info_.deopt_check = tasm()->pc_offset();
213   // We want to bailout only from JS functions, which are the only ones
214   // that are optimized.
215   if (info->IsOptimizing()) {
216     DCHECK(linkage()->GetIncomingDescriptor()->IsJSFunctionCall());
217     tasm()->RecordComment("-- Prologue: check for deoptimization --");
218     BailoutIfDeoptimized();
219   }
220 
221   // Define deoptimization literals for all inlined functions.
222   DCHECK_EQ(0u, deoptimization_literals_.size());
223   for (OptimizedCompilationInfo::InlinedFunctionHolder& inlined :
224        info->inlined_functions()) {
225     if (!inlined.shared_info.equals(info->shared_info())) {
226       int index = DefineDeoptimizationLiteral(
227           DeoptimizationLiteral(inlined.shared_info));
228       inlined.RegisterInlinedFunctionId(index);
229     }
230   }
231   inlined_function_count_ = deoptimization_literals_.size();
232 
233   // Define deoptimization literals for all BytecodeArrays to which we might
234   // deopt to ensure they are strongly held by the optimized code.
235   if (info->has_bytecode_array()) {
236     DefineDeoptimizationLiteral(DeoptimizationLiteral(info->bytecode_array()));
237   }
238   for (OptimizedCompilationInfo::InlinedFunctionHolder& inlined :
239        info->inlined_functions()) {
240     DefineDeoptimizationLiteral(DeoptimizationLiteral(inlined.bytecode_array));
241   }
242 
243   unwinding_info_writer_.SetNumberOfInstructionBlocks(
244       instructions()->InstructionBlockCount());
245 
246   if (info->trace_turbo_json()) {
247     block_starts_.assign(instructions()->instruction_blocks().size(), -1);
248     instr_starts_.assign(instructions()->instructions().size(), {});
249   }
250   // Assemble instructions in assembly order.
251   offsets_info_.blocks_start = tasm()->pc_offset();
252   for (const InstructionBlock* block : instructions()->ao_blocks()) {
253     // Align loop headers on vendor recommended boundaries.
254     if (!tasm()->jump_optimization_info()) {
255       if (block->ShouldAlignLoopHeader()) {
256         tasm()->LoopHeaderAlign();
257       } else if (block->ShouldAlignCodeTarget()) {
258         tasm()->CodeTargetAlign();
259       }
260     }
261     if (info->trace_turbo_json()) {
262       block_starts_[block->rpo_number().ToInt()] = tasm()->pc_offset();
263     }
264     // Bind a label for a block.
265     current_block_ = block->rpo_number();
266     unwinding_info_writer_.BeginInstructionBlock(tasm()->pc_offset(), block);
267     if (FLAG_code_comments) {
268       std::ostringstream buffer;
269       buffer << "-- B" << block->rpo_number().ToInt() << " start";
270       if (block->IsDeferred()) buffer << " (deferred)";
271       if (!block->needs_frame()) buffer << " (no frame)";
272       if (block->must_construct_frame()) buffer << " (construct frame)";
273       if (block->must_deconstruct_frame()) buffer << " (deconstruct frame)";
274 
275       if (block->IsLoopHeader()) {
276         buffer << " (loop up to " << block->loop_end().ToInt() << ")";
277       }
278       if (block->loop_header().IsValid()) {
279         buffer << " (in loop " << block->loop_header().ToInt() << ")";
280       }
281       buffer << " --";
282       tasm()->RecordComment(buffer.str().c_str());
283     }
284 
285     frame_access_state()->MarkHasFrame(block->needs_frame());
286 
287     tasm()->bind(GetLabel(current_block_));
288 
289     if (block->must_construct_frame()) {
290       AssembleConstructFrame();
291       // We need to setup the root register after we assemble the prologue, to
292       // avoid clobbering callee saved registers in case of C linkage and
293       // using the roots.
294       // TODO(mtrofin): investigate how we can avoid doing this repeatedly.
295       if (linkage()->GetIncomingDescriptor()->InitializeRootRegister()) {
296         tasm()->InitializeRootRegister();
297       }
298     }
299 
300     if (FLAG_enable_embedded_constant_pool && !block->needs_frame()) {
301       ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
302       result_ = AssembleBlock(block);
303     } else {
304       result_ = AssembleBlock(block);
305     }
306     if (result_ != kSuccess) return;
307     unwinding_info_writer_.EndInstructionBlock(block);
308   }
309 
310   // Assemble all out-of-line code.
311   offsets_info_.out_of_line_code = tasm()->pc_offset();
312   if (ools_) {
313     tasm()->RecordComment("-- Out of line code --");
314     for (OutOfLineCode* ool = ools_; ool; ool = ool->next()) {
315       tasm()->bind(ool->entry());
316       ool->Generate();
317       if (ool->exit()->is_bound()) tasm()->jmp(ool->exit());
318     }
319   }
320 
321   // This nop operation is needed to ensure that the trampoline is not
322   // confused with the pc of the call before deoptimization.
323   // The test regress/regress-259 is an example of where we need it.
324   tasm()->nop();
325 
326   // For some targets, we must make sure that constant and veneer pools are
327   // emitted before emitting the deoptimization exits.
328   PrepareForDeoptimizationExits(&deoptimization_exits_);
329 
330   deopt_exit_start_offset_ = tasm()->pc_offset();
331 
332   // Assemble deoptimization exits.
333   offsets_info_.deoptimization_exits = tasm()->pc_offset();
334   int last_updated = 0;
335   // We sort the deoptimization exits here so that the lazy ones will be visited
336   // last. We need this as lazy deopts might need additional instructions.
337   auto cmp = [](const DeoptimizationExit* a, const DeoptimizationExit* b) {
338     // The deoptimization exits are sorted so that lazy deopt exits appear after
339     // eager deopts.
340     static_assert(static_cast<int>(DeoptimizeKind::kLazy) ==
341                       static_cast<int>(kLastDeoptimizeKind),
342                   "lazy deopts are expected to be emitted last");
343     if (a->kind() != b->kind()) {
344       return a->kind() < b->kind();
345     }
346     return a->pc_offset() < b->pc_offset();
347   };
348   std::sort(deoptimization_exits_.begin(), deoptimization_exits_.end(), cmp);
349 
350   {
351 #ifdef V8_TARGET_ARCH_PPC64
352     v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
353         tasm());
354 #endif
355     for (DeoptimizationExit* exit : deoptimization_exits_) {
356       if (exit->emitted()) continue;
357       exit->set_deoptimization_id(next_deoptimization_id_++);
358       result_ = AssembleDeoptimizerCall(exit);
359       if (result_ != kSuccess) return;
360 
361       // UpdateDeoptimizationInfo expects lazy deopts to be visited in pc_offset
362       // order, which is always the case since they are added to
363       // deoptimization_exits_ in that order, and the optional sort operation
364       // above preserves that order.
365       if (exit->kind() == DeoptimizeKind::kLazy) {
366         int trampoline_pc = exit->label()->pos();
367         last_updated = safepoints()->UpdateDeoptimizationInfo(
368             exit->pc_offset(), trampoline_pc, last_updated,
369             exit->deoptimization_id());
370       }
371     }
372   }
373 
374   offsets_info_.pools = tasm()->pc_offset();
375   // TODO(jgruber): Move all inlined metadata generation into a new,
376   // architecture-independent version of FinishCode. Currently, this includes
377   // the safepoint table, handler table, constant pool, and code comments, in
378   // that order.
379   FinishCode();
380 
381   offsets_info_.jump_tables = tasm()->pc_offset();
382   // Emit the jump tables.
383   if (jump_tables_) {
384     tasm()->Align(kSystemPointerSize);
385     for (JumpTable* table = jump_tables_; table; table = table->next()) {
386       tasm()->bind(table->label());
387       AssembleJumpTable(table->targets(), table->target_count());
388     }
389   }
390 
391   // The PerfJitLogger logs code up until here, excluding the safepoint
392   // table. Resolve the unwinding info now so it is aware of the same code
393   // size as reported by perf.
394   unwinding_info_writer_.Finish(tasm()->pc_offset());
395 
396   // Final alignment before starting on the metadata section.
397   tasm()->Align(Code::kMetadataAlignment);
398 
399   safepoints()->Emit(tasm(), frame()->GetTotalFrameSlotCount());
400 
401   // Emit the exception handler table.
402   if (!handlers_.empty()) {
403     handler_table_offset_ = HandlerTable::EmitReturnTableStart(tasm());
404     for (size_t i = 0; i < handlers_.size(); ++i) {
405       HandlerTable::EmitReturnEntry(tasm(), handlers_[i].pc_offset,
406                                     handlers_[i].handler->pos());
407     }
408   }
409 
410   tasm()->MaybeEmitOutOfLineConstantPool();
411   tasm()->FinalizeJumpOptimizationInfo();
412 
413   result_ = kSuccess;
414 }
415 
AssembleArchBinarySearchSwitchRange(Register input,RpoNumber def_block,std::pair<int32_t,Label * > * begin,std::pair<int32_t,Label * > * end)416 void CodeGenerator::AssembleArchBinarySearchSwitchRange(
417     Register input, RpoNumber def_block, std::pair<int32_t, Label*>* begin,
418     std::pair<int32_t, Label*>* end) {
419   if (end - begin < kBinarySearchSwitchMinimalCases) {
420     while (begin != end) {
421       tasm()->JumpIfEqual(input, begin->first, begin->second);
422       ++begin;
423     }
424     AssembleArchJumpRegardlessOfAssemblyOrder(def_block);
425     return;
426   }
427   auto middle = begin + (end - begin) / 2;
428   Label less_label;
429   tasm()->JumpIfLessThan(input, middle->first, &less_label);
430   AssembleArchBinarySearchSwitchRange(input, def_block, middle, end);
431   tasm()->bind(&less_label);
432   AssembleArchBinarySearchSwitchRange(input, def_block, begin, middle);
433 }
434 
AssembleArchJump(RpoNumber target)435 void CodeGenerator::AssembleArchJump(RpoNumber target) {
436   if (!IsNextInAssemblyOrder(target))
437     AssembleArchJumpRegardlessOfAssemblyOrder(target);
438 }
439 
GetSourcePositionTable()440 base::OwnedVector<byte> CodeGenerator::GetSourcePositionTable() {
441   return source_position_table_builder_.ToSourcePositionTableVector();
442 }
443 
GetProtectedInstructionsData()444 base::OwnedVector<byte> CodeGenerator::GetProtectedInstructionsData() {
445   return base::OwnedVector<byte>::Of(
446       base::Vector<byte>::cast(base::VectorOf(protected_instructions_)));
447 }
448 
FinalizeCode()449 MaybeHandle<Code> CodeGenerator::FinalizeCode() {
450   if (result_ != kSuccess) {
451     tasm()->AbortedCodeGeneration();
452     return MaybeHandle<Code>();
453   }
454 
455   // Allocate the source position table.
456   Handle<ByteArray> source_positions =
457       source_position_table_builder_.ToSourcePositionTable(isolate());
458 
459   // Allocate deoptimization data.
460   Handle<DeoptimizationData> deopt_data = GenerateDeoptimizationData();
461 
462   // Allocate and install the code.
463   CodeDesc desc;
464   tasm()->GetCode(isolate(), &desc, safepoints(), handler_table_offset_);
465 
466 #if defined(V8_OS_WIN64)
467   if (Builtins::IsBuiltinId(info_->builtin())) {
468     isolate_->SetBuiltinUnwindData(info_->builtin(), tasm()->GetUnwindInfo());
469   }
470 #endif  // V8_OS_WIN64
471 
472   if (unwinding_info_writer_.eh_frame_writer()) {
473     unwinding_info_writer_.eh_frame_writer()->GetEhFrame(&desc);
474   }
475 
476   MaybeHandle<Code> maybe_code =
477       Factory::CodeBuilder(isolate(), desc, info()->code_kind())
478           .set_builtin(info()->builtin())
479           .set_inlined_bytecode_size(info()->inlined_bytecode_size())
480           .set_source_position_table(source_positions)
481           .set_deoptimization_data(deopt_data)
482           .set_is_turbofanned()
483           .set_stack_slots(frame()->GetTotalFrameSlotCount())
484           .set_profiler_data(info()->profiler_data())
485           .TryBuild();
486 
487   Handle<Code> code;
488   if (!maybe_code.ToHandle(&code)) {
489     tasm()->AbortedCodeGeneration();
490     return MaybeHandle<Code>();
491   }
492 
493   // Counts both compiled code and metadata.
494   isolate()->counters()->total_compiled_code_size()->Increment(
495       code->raw_body_size());
496 
497   LOG_CODE_EVENT(isolate(), CodeLinePosInfoRecordEvent(
498                                 code->raw_instruction_start(),
499                                 *source_positions, JitCodeEvent::JIT_CODE));
500 
501   return code;
502 }
503 
IsNextInAssemblyOrder(RpoNumber block) const504 bool CodeGenerator::IsNextInAssemblyOrder(RpoNumber block) const {
505   return instructions()
506       ->InstructionBlockAt(current_block_)
507       ->ao_number()
508       .IsNext(instructions()->InstructionBlockAt(block)->ao_number());
509 }
510 
RecordSafepoint(ReferenceMap * references)511 void CodeGenerator::RecordSafepoint(ReferenceMap* references) {
512   auto safepoint = safepoints()->DefineSafepoint(tasm());
513   int frame_header_offset = frame()->GetFixedSlotCount();
514   for (const InstructionOperand& operand : references->reference_operands()) {
515     if (operand.IsStackSlot()) {
516       int index = LocationOperand::cast(operand).index();
517       DCHECK_LE(0, index);
518       // We might index values in the fixed part of the frame (i.e. the
519       // closure pointer or the context pointer); these are not spill slots
520       // and therefore don't work with the SafepointTable currently, but
521       // we also don't need to worry about them, since the GC has special
522       // knowledge about those fields anyway.
523       if (index < frame_header_offset) continue;
524       safepoint.DefineTaggedStackSlot(index);
525     }
526   }
527 }
528 
IsMaterializableFromRoot(Handle<HeapObject> object,RootIndex * index_return)529 bool CodeGenerator::IsMaterializableFromRoot(Handle<HeapObject> object,
530                                              RootIndex* index_return) {
531   const CallDescriptor* incoming_descriptor =
532       linkage()->GetIncomingDescriptor();
533   if (incoming_descriptor->flags() & CallDescriptor::kCanUseRoots) {
534     return isolate()->roots_table().IsRootHandle(object, index_return) &&
535            RootsTable::IsImmortalImmovable(*index_return);
536   }
537   return false;
538 }
539 
AssembleBlock(const InstructionBlock * block)540 CodeGenerator::CodeGenResult CodeGenerator::AssembleBlock(
541     const InstructionBlock* block) {
542   if (block->IsHandler()) {
543     tasm()->ExceptionHandler();
544   }
545   for (int i = block->code_start(); i < block->code_end(); ++i) {
546     CodeGenResult result = AssembleInstruction(i, block);
547     if (result != kSuccess) return result;
548   }
549   return kSuccess;
550 }
551 
IsValidPush(InstructionOperand source,CodeGenerator::PushTypeFlags push_type)552 bool CodeGenerator::IsValidPush(InstructionOperand source,
553                                 CodeGenerator::PushTypeFlags push_type) {
554   if (source.IsImmediate() &&
555       ((push_type & CodeGenerator::kImmediatePush) != 0)) {
556     return true;
557   }
558   if (source.IsRegister() &&
559       ((push_type & CodeGenerator::kRegisterPush) != 0)) {
560     return true;
561   }
562   if (source.IsStackSlot() &&
563       ((push_type & CodeGenerator::kStackSlotPush) != 0)) {
564     return true;
565   }
566   return false;
567 }
568 
GetPushCompatibleMoves(Instruction * instr,PushTypeFlags push_type,ZoneVector<MoveOperands * > * pushes)569 void CodeGenerator::GetPushCompatibleMoves(Instruction* instr,
570                                            PushTypeFlags push_type,
571                                            ZoneVector<MoveOperands*>* pushes) {
572   static constexpr int first_push_compatible_index =
573       kReturnAddressStackSlotCount;
574   pushes->clear();
575   for (int i = Instruction::FIRST_GAP_POSITION;
576        i <= Instruction::LAST_GAP_POSITION; ++i) {
577     Instruction::GapPosition inner_pos =
578         static_cast<Instruction::GapPosition>(i);
579     ParallelMove* parallel_move = instr->GetParallelMove(inner_pos);
580     if (parallel_move != nullptr) {
581       for (auto move : *parallel_move) {
582         InstructionOperand source = move->source();
583         InstructionOperand destination = move->destination();
584         // If there are any moves from slots that will be overridden by pushes,
585         // then the full gap resolver must be used since optimization with
586         // pushes don't participate in the parallel move and might clobber
587         // values needed for the gap resolve.
588         if (source.IsAnyStackSlot() && LocationOperand::cast(source).index() >=
589                                            first_push_compatible_index) {
590           pushes->clear();
591           return;
592         }
593         // TODO(danno): Right now, only consider moves from the FIRST gap for
594         // pushes. Theoretically, we could extract pushes for both gaps (there
595         // are cases where this happens), but the logic for that would also have
596         // to check to make sure that non-memory inputs to the pushes from the
597         // LAST gap don't get clobbered in the FIRST gap.
598         if (i == Instruction::FIRST_GAP_POSITION) {
599           if (destination.IsStackSlot() &&
600               LocationOperand::cast(destination).index() >=
601                   first_push_compatible_index) {
602             int index = LocationOperand::cast(destination).index();
603             if (IsValidPush(source, push_type)) {
604               if (index >= static_cast<int>(pushes->size())) {
605                 pushes->resize(index + 1);
606               }
607               (*pushes)[index] = move;
608             }
609           }
610         }
611       }
612     }
613   }
614 
615   // For now, only support a set of continuous pushes at the end of the list.
616   size_t push_count_upper_bound = pushes->size();
617   size_t push_begin = push_count_upper_bound;
618   for (auto move : base::Reversed(*pushes)) {
619     if (move == nullptr) break;
620     push_begin--;
621   }
622   size_t push_count = pushes->size() - push_begin;
623   std::copy(pushes->begin() + push_begin,
624             pushes->begin() + push_begin + push_count, pushes->begin());
625   pushes->resize(push_count);
626 }
627 
InferMove(InstructionOperand * source,InstructionOperand * destination)628 CodeGenerator::MoveType::Type CodeGenerator::MoveType::InferMove(
629     InstructionOperand* source, InstructionOperand* destination) {
630   if (source->IsConstant()) {
631     if (destination->IsAnyRegister()) {
632       return MoveType::kConstantToRegister;
633     } else {
634       DCHECK(destination->IsAnyStackSlot());
635       return MoveType::kConstantToStack;
636     }
637   }
638   DCHECK(LocationOperand::cast(source)->IsCompatible(
639       LocationOperand::cast(destination)));
640   if (source->IsAnyRegister()) {
641     if (destination->IsAnyRegister()) {
642       return MoveType::kRegisterToRegister;
643     } else {
644       DCHECK(destination->IsAnyStackSlot());
645       return MoveType::kRegisterToStack;
646     }
647   } else {
648     DCHECK(source->IsAnyStackSlot());
649     if (destination->IsAnyRegister()) {
650       return MoveType::kStackToRegister;
651     } else {
652       DCHECK(destination->IsAnyStackSlot());
653       return MoveType::kStackToStack;
654     }
655   }
656 }
657 
InferSwap(InstructionOperand * source,InstructionOperand * destination)658 CodeGenerator::MoveType::Type CodeGenerator::MoveType::InferSwap(
659     InstructionOperand* source, InstructionOperand* destination) {
660   DCHECK(LocationOperand::cast(source)->IsCompatible(
661       LocationOperand::cast(destination)));
662   if (source->IsAnyRegister()) {
663     if (destination->IsAnyRegister()) {
664       return MoveType::kRegisterToRegister;
665     } else {
666       DCHECK(destination->IsAnyStackSlot());
667       return MoveType::kRegisterToStack;
668     }
669   } else {
670     DCHECK(source->IsAnyStackSlot());
671     DCHECK(destination->IsAnyStackSlot());
672     return MoveType::kStackToStack;
673   }
674 }
675 
ComputeBranchInfo(BranchInfo * branch,Instruction * instr)676 RpoNumber CodeGenerator::ComputeBranchInfo(BranchInfo* branch,
677                                            Instruction* instr) {
678   // Assemble a branch after this instruction.
679   InstructionOperandConverter i(this, instr);
680   RpoNumber true_rpo = i.InputRpo(instr->InputCount() - 2);
681   RpoNumber false_rpo = i.InputRpo(instr->InputCount() - 1);
682 
683   if (true_rpo == false_rpo) {
684     return true_rpo;
685   }
686   FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
687   if (IsNextInAssemblyOrder(true_rpo)) {
688     // true block is next, can fall through if condition negated.
689     std::swap(true_rpo, false_rpo);
690     condition = NegateFlagsCondition(condition);
691   }
692   branch->condition = condition;
693   branch->true_label = GetLabel(true_rpo);
694   branch->false_label = GetLabel(false_rpo);
695   branch->fallthru = IsNextInAssemblyOrder(false_rpo);
696   return RpoNumber::Invalid();
697 }
698 
AssembleInstruction(int instruction_index,const InstructionBlock * block)699 CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
700     int instruction_index, const InstructionBlock* block) {
701   Instruction* instr = instructions()->InstructionAt(instruction_index);
702   if (info()->trace_turbo_json()) {
703     instr_starts_[instruction_index].gap_pc_offset = tasm()->pc_offset();
704   }
705   int first_unused_stack_slot;
706   FlagsMode mode = FlagsModeField::decode(instr->opcode());
707   if (mode != kFlags_trap) {
708     AssembleSourcePosition(instr);
709   }
710   bool adjust_stack =
711       GetSlotAboveSPBeforeTailCall(instr, &first_unused_stack_slot);
712   if (adjust_stack) AssembleTailCallBeforeGap(instr, first_unused_stack_slot);
713   AssembleGaps(instr);
714   if (adjust_stack) AssembleTailCallAfterGap(instr, first_unused_stack_slot);
715   DCHECK_IMPLIES(
716       block->must_deconstruct_frame(),
717       instr != instructions()->InstructionAt(block->last_instruction_index()) ||
718           instr->IsRet() || instr->IsJump());
719   if (instr->IsJump() && block->must_deconstruct_frame()) {
720     AssembleDeconstructFrame();
721   }
722   if (info()->trace_turbo_json()) {
723     instr_starts_[instruction_index].arch_instr_pc_offset = tasm()->pc_offset();
724   }
725   // Assemble architecture-specific code for the instruction.
726   CodeGenResult result = AssembleArchInstruction(instr);
727   if (result != kSuccess) return result;
728 
729   if (info()->trace_turbo_json()) {
730     instr_starts_[instruction_index].condition_pc_offset = tasm()->pc_offset();
731   }
732 
733   FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
734   switch (mode) {
735     case kFlags_branch: {
736       BranchInfo branch;
737       RpoNumber target = ComputeBranchInfo(&branch, instr);
738       if (target.IsValid()) {
739         // redundant branch.
740         if (!IsNextInAssemblyOrder(target)) {
741           AssembleArchJump(target);
742         }
743         return kSuccess;
744       }
745       // Assemble architecture-specific branch.
746       AssembleArchBranch(instr, &branch);
747       break;
748     }
749     case kFlags_deoptimize: {
750       // Assemble a conditional eager deoptimization after this instruction.
751       InstructionOperandConverter i(this, instr);
752       size_t frame_state_offset =
753           DeoptFrameStateOffsetField::decode(instr->opcode());
754       size_t immediate_args_count =
755           DeoptImmedArgsCountField::decode(instr->opcode());
756       DeoptimizationExit* const exit = AddDeoptimizationExit(
757           instr, frame_state_offset, immediate_args_count);
758       BranchInfo branch;
759       branch.condition = condition;
760       branch.true_label = exit->label();
761       branch.false_label = exit->continue_label();
762       branch.fallthru = true;
763       AssembleArchDeoptBranch(instr, &branch);
764       tasm()->bind(exit->continue_label());
765       break;
766     }
767     case kFlags_set: {
768       // Assemble a boolean materialization after this instruction.
769       AssembleArchBoolean(instr, condition);
770       break;
771     }
772     case kFlags_select: {
773       AssembleArchSelect(instr, condition);
774       break;
775     }
776     case kFlags_trap: {
777 #if V8_ENABLE_WEBASSEMBLY
778       AssembleArchTrap(instr, condition);
779       break;
780 #else
781       UNREACHABLE();
782 #endif  // V8_ENABLE_WEBASSEMBLY
783     }
784     case kFlags_none: {
785       break;
786     }
787   }
788 
789   return kSuccess;
790 }
791 
AssembleSourcePosition(Instruction * instr)792 void CodeGenerator::AssembleSourcePosition(Instruction* instr) {
793   SourcePosition source_position = SourcePosition::Unknown();
794   if (instr->IsNop() && instr->AreMovesRedundant()) return;
795   if (!instructions()->GetSourcePosition(instr, &source_position)) return;
796   AssembleSourcePosition(source_position);
797 }
798 
AssembleSourcePosition(SourcePosition source_position)799 void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
800   if (source_position == current_source_position_) return;
801   current_source_position_ = source_position;
802   if (!source_position.IsKnown()) return;
803   source_position_table_builder_.AddPosition(tasm()->pc_offset(),
804                                              source_position, false);
805   if (FLAG_code_comments) {
806     OptimizedCompilationInfo* info = this->info();
807     if (!info->IsOptimizing()) {
808 #if V8_ENABLE_WEBASSEMBLY
809       if (!info->IsWasm()) return;
810 #else
811       return;
812 #endif  // V8_ENABLE_WEBASSEMBLY
813     }
814     std::ostringstream buffer;
815     buffer << "-- ";
816     // Turbolizer only needs the source position, as it can reconstruct
817     // the inlining stack from other information.
818     if (info->trace_turbo_json() || !tasm()->isolate() ||
819         tasm()->isolate()->concurrent_recompilation_enabled()) {
820       buffer << source_position;
821     } else {
822       AllowGarbageCollection allocation;
823       AllowHandleAllocation handles;
824       AllowHandleDereference deref;
825       buffer << source_position.InliningStack(info);
826     }
827     buffer << " --";
828     tasm()->RecordComment(buffer.str().c_str());
829   }
830 }
831 
GetSlotAboveSPBeforeTailCall(Instruction * instr,int * slot)832 bool CodeGenerator::GetSlotAboveSPBeforeTailCall(Instruction* instr,
833                                                  int* slot) {
834   if (instr->IsTailCall()) {
835     InstructionOperandConverter g(this, instr);
836     *slot = g.InputInt32(instr->InputCount() - 1);
837     return true;
838   } else {
839     return false;
840   }
841 }
842 
DetermineStubCallMode() const843 StubCallMode CodeGenerator::DetermineStubCallMode() const {
844 #if V8_ENABLE_WEBASSEMBLY
845   CodeKind code_kind = info()->code_kind();
846   if (code_kind == CodeKind::WASM_FUNCTION ||
847       code_kind == CodeKind::WASM_TO_CAPI_FUNCTION ||
848       code_kind == CodeKind::WASM_TO_JS_FUNCTION) {
849     return StubCallMode::kCallWasmRuntimeStub;
850   }
851 #endif  // V8_ENABLE_WEBASSEMBLY
852   return StubCallMode::kCallCodeObject;
853 }
854 
AssembleGaps(Instruction * instr)855 void CodeGenerator::AssembleGaps(Instruction* instr) {
856   for (int i = Instruction::FIRST_GAP_POSITION;
857        i <= Instruction::LAST_GAP_POSITION; i++) {
858     Instruction::GapPosition inner_pos =
859         static_cast<Instruction::GapPosition>(i);
860     ParallelMove* move = instr->GetParallelMove(inner_pos);
861     if (move != nullptr) resolver()->Resolve(move);
862   }
863 }
864 
865 namespace {
866 
CreateInliningPositions(OptimizedCompilationInfo * info,Isolate * isolate)867 Handle<PodArray<InliningPosition>> CreateInliningPositions(
868     OptimizedCompilationInfo* info, Isolate* isolate) {
869   const OptimizedCompilationInfo::InlinedFunctionList& inlined_functions =
870       info->inlined_functions();
871   Handle<PodArray<InliningPosition>> inl_positions =
872       PodArray<InliningPosition>::New(
873           isolate, static_cast<int>(inlined_functions.size()),
874           AllocationType::kOld);
875   for (size_t i = 0; i < inlined_functions.size(); ++i) {
876     inl_positions->set(static_cast<int>(i), inlined_functions[i].position);
877   }
878   return inl_positions;
879 }
880 
881 }  // namespace
882 
GenerateDeoptimizationData()883 Handle<DeoptimizationData> CodeGenerator::GenerateDeoptimizationData() {
884   OptimizedCompilationInfo* info = this->info();
885   int deopt_count = static_cast<int>(deoptimization_exits_.size());
886   if (deopt_count == 0 && !info->is_osr()) {
887     return DeoptimizationData::Empty(isolate());
888   }
889   Handle<DeoptimizationData> data =
890       DeoptimizationData::New(isolate(), deopt_count, AllocationType::kOld);
891 
892   Handle<TranslationArray> translation_array =
893       translations_.ToTranslationArray(isolate()->factory());
894 
895   data->SetTranslationByteArray(*translation_array);
896   data->SetInlinedFunctionCount(
897       Smi::FromInt(static_cast<int>(inlined_function_count_)));
898   data->SetOptimizationId(Smi::FromInt(info->optimization_id()));
899 
900   data->SetDeoptExitStart(Smi::FromInt(deopt_exit_start_offset_));
901   data->SetEagerDeoptCount(Smi::FromInt(eager_deopt_count_));
902   data->SetLazyDeoptCount(Smi::FromInt(lazy_deopt_count_));
903 
904   if (info->has_shared_info()) {
905     data->SetSharedFunctionInfo(*info->shared_info());
906   } else {
907     data->SetSharedFunctionInfo(Smi::zero());
908   }
909 
910   Handle<DeoptimizationLiteralArray> literals =
911       isolate()->factory()->NewDeoptimizationLiteralArray(
912           static_cast<int>(deoptimization_literals_.size()));
913   for (unsigned i = 0; i < deoptimization_literals_.size(); i++) {
914     Handle<Object> object = deoptimization_literals_[i].Reify(isolate());
915     CHECK(!object.is_null());
916     literals->set(i, *object);
917   }
918   data->SetLiteralArray(*literals);
919 
920   Handle<PodArray<InliningPosition>> inl_pos =
921       CreateInliningPositions(info, isolate());
922   data->SetInliningPositions(*inl_pos);
923 
924   if (info->is_osr()) {
925     DCHECK_LE(0, osr_pc_offset_);
926     data->SetOsrBytecodeOffset(Smi::FromInt(info_->osr_offset().ToInt()));
927     data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
928   } else {
929     BytecodeOffset osr_offset = BytecodeOffset::None();
930     data->SetOsrBytecodeOffset(Smi::FromInt(osr_offset.ToInt()));
931     data->SetOsrPcOffset(Smi::FromInt(-1));
932   }
933 
934   // Populate deoptimization entries.
935   for (int i = 0; i < deopt_count; i++) {
936     DeoptimizationExit* deoptimization_exit = deoptimization_exits_[i];
937     CHECK_NOT_NULL(deoptimization_exit);
938     DCHECK_EQ(i, deoptimization_exit->deoptimization_id());
939     data->SetBytecodeOffset(i, deoptimization_exit->bailout_id());
940     data->SetTranslationIndex(
941         i, Smi::FromInt(deoptimization_exit->translation_id()));
942     data->SetPc(i, Smi::FromInt(deoptimization_exit->pc_offset()));
943 #ifdef DEBUG
944     data->SetNodeId(i, Smi::FromInt(deoptimization_exit->node_id()));
945 #endif  // DEBUG
946   }
947 
948   return data;
949 }
950 
AddJumpTable(Label ** targets,size_t target_count)951 Label* CodeGenerator::AddJumpTable(Label** targets, size_t target_count) {
952   jump_tables_ = zone()->New<JumpTable>(jump_tables_, targets, target_count);
953   return jump_tables_->label();
954 }
955 
RecordCallPosition(Instruction * instr)956 void CodeGenerator::RecordCallPosition(Instruction* instr) {
957   const bool needs_frame_state =
958       instr->HasCallDescriptorFlag(CallDescriptor::kNeedsFrameState);
959   RecordSafepoint(instr->reference_map());
960 
961   if (instr->HasCallDescriptorFlag(CallDescriptor::kHasExceptionHandler)) {
962     InstructionOperandConverter i(this, instr);
963     RpoNumber handler_rpo = i.InputRpo(instr->InputCount() - 1);
964     DCHECK(instructions()->InstructionBlockAt(handler_rpo)->IsHandler());
965     handlers_.push_back(
966         {GetLabel(handler_rpo), tasm()->pc_offset_for_safepoint()});
967   }
968 
969   if (needs_frame_state) {
970     MarkLazyDeoptSite();
971     // If the frame state is present, it starts at argument 1 - after
972     // the code address.
973     size_t frame_state_offset = 1;
974     FrameStateDescriptor* descriptor =
975         GetDeoptimizationEntry(instr, frame_state_offset).descriptor();
976     int pc_offset = tasm()->pc_offset_for_safepoint();
977     BuildTranslation(instr, pc_offset, frame_state_offset, 0,
978                      descriptor->state_combine());
979   }
980 }
981 
DefineDeoptimizationLiteral(DeoptimizationLiteral literal)982 int CodeGenerator::DefineDeoptimizationLiteral(DeoptimizationLiteral literal) {
983   literal.Validate();
984   int result = static_cast<int>(deoptimization_literals_.size());
985   for (unsigned i = 0; i < deoptimization_literals_.size(); ++i) {
986     deoptimization_literals_[i].Validate();
987     if (deoptimization_literals_[i] == literal) return i;
988   }
989   deoptimization_literals_.push_back(literal);
990   return result;
991 }
992 
GetDeoptimizationEntry(Instruction * instr,size_t frame_state_offset)993 DeoptimizationEntry const& CodeGenerator::GetDeoptimizationEntry(
994     Instruction* instr, size_t frame_state_offset) {
995   InstructionOperandConverter i(this, instr);
996   int const state_id = i.InputInt32(frame_state_offset);
997   return instructions()->GetDeoptimizationEntry(state_id);
998 }
999 
TranslateStateValueDescriptor(StateValueDescriptor * desc,StateValueList * nested,InstructionOperandIterator * iter)1000 void CodeGenerator::TranslateStateValueDescriptor(
1001     StateValueDescriptor* desc, StateValueList* nested,
1002     InstructionOperandIterator* iter) {
1003   if (desc->IsNested()) {
1004     translations_.BeginCapturedObject(static_cast<int>(nested->size()));
1005     for (auto field : *nested) {
1006       TranslateStateValueDescriptor(field.desc, field.nested, iter);
1007     }
1008   } else if (desc->IsArgumentsElements()) {
1009     translations_.ArgumentsElements(desc->arguments_type());
1010   } else if (desc->IsArgumentsLength()) {
1011     translations_.ArgumentsLength();
1012   } else if (desc->IsDuplicate()) {
1013     translations_.DuplicateObject(static_cast<int>(desc->id()));
1014   } else if (desc->IsPlain()) {
1015     InstructionOperand* op = iter->Advance();
1016     AddTranslationForOperand(iter->instruction(), op, desc->type());
1017   } else {
1018     DCHECK(desc->IsOptimizedOut());
1019       if (optimized_out_literal_id_ == -1) {
1020         optimized_out_literal_id_ = DefineDeoptimizationLiteral(
1021             DeoptimizationLiteral(isolate()->factory()->optimized_out()));
1022       }
1023       translations_.StoreLiteral(optimized_out_literal_id_);
1024   }
1025 }
1026 
TranslateFrameStateDescriptorOperands(FrameStateDescriptor * desc,InstructionOperandIterator * iter)1027 void CodeGenerator::TranslateFrameStateDescriptorOperands(
1028     FrameStateDescriptor* desc, InstructionOperandIterator* iter) {
1029   size_t index = 0;
1030   StateValueList* values = desc->GetStateValueDescriptors();
1031   for (StateValueList::iterator it = values->begin(); it != values->end();
1032        ++it, ++index) {
1033     TranslateStateValueDescriptor((*it).desc, (*it).nested, iter);
1034   }
1035   DCHECK_EQ(desc->GetSize(), index);
1036 }
1037 
BuildTranslationForFrameStateDescriptor(FrameStateDescriptor * descriptor,InstructionOperandIterator * iter,OutputFrameStateCombine state_combine)1038 void CodeGenerator::BuildTranslationForFrameStateDescriptor(
1039     FrameStateDescriptor* descriptor, InstructionOperandIterator* iter,
1040     OutputFrameStateCombine state_combine) {
1041   // Outer-most state must be added to translation first.
1042   if (descriptor->outer_state() != nullptr) {
1043     BuildTranslationForFrameStateDescriptor(descriptor->outer_state(), iter,
1044                                             state_combine);
1045   }
1046 
1047   Handle<SharedFunctionInfo> shared_info;
1048   if (!descriptor->shared_info().ToHandle(&shared_info)) {
1049     if (!info()->has_shared_info()) {
1050       return;  // Stub with no SharedFunctionInfo.
1051     }
1052     shared_info = info()->shared_info();
1053   }
1054 
1055   const BytecodeOffset bailout_id = descriptor->bailout_id();
1056   const int shared_info_id =
1057       DefineDeoptimizationLiteral(DeoptimizationLiteral(shared_info));
1058   const unsigned int height =
1059       static_cast<unsigned int>(descriptor->GetHeight());
1060 
1061   switch (descriptor->type()) {
1062     case FrameStateType::kUnoptimizedFunction: {
1063       int return_offset = 0;
1064       int return_count = 0;
1065       if (!state_combine.IsOutputIgnored()) {
1066         return_offset = static_cast<int>(state_combine.GetOffsetToPokeAt());
1067         return_count = static_cast<int>(iter->instruction()->OutputCount());
1068       }
1069       translations_.BeginInterpretedFrame(bailout_id, shared_info_id, height,
1070                                           return_offset, return_count);
1071       break;
1072     }
1073     case FrameStateType::kArgumentsAdaptor:
1074       translations_.BeginArgumentsAdaptorFrame(shared_info_id, height);
1075       break;
1076     case FrameStateType::kConstructStub:
1077       DCHECK(bailout_id.IsValidForConstructStub());
1078       translations_.BeginConstructStubFrame(bailout_id, shared_info_id, height);
1079       break;
1080     case FrameStateType::kBuiltinContinuation: {
1081       translations_.BeginBuiltinContinuationFrame(bailout_id, shared_info_id,
1082                                                   height);
1083       break;
1084     }
1085 #if V8_ENABLE_WEBASSEMBLY
1086     case FrameStateType::kJSToWasmBuiltinContinuation: {
1087       const JSToWasmFrameStateDescriptor* js_to_wasm_descriptor =
1088           static_cast<const JSToWasmFrameStateDescriptor*>(descriptor);
1089       translations_.BeginJSToWasmBuiltinContinuationFrame(
1090           bailout_id, shared_info_id, height,
1091           js_to_wasm_descriptor->return_kind());
1092       break;
1093     }
1094 #endif  // V8_ENABLE_WEBASSEMBLY
1095     case FrameStateType::kJavaScriptBuiltinContinuation: {
1096       translations_.BeginJavaScriptBuiltinContinuationFrame(
1097           bailout_id, shared_info_id, height);
1098       break;
1099     }
1100     case FrameStateType::kJavaScriptBuiltinContinuationWithCatch: {
1101       translations_.BeginJavaScriptBuiltinContinuationWithCatchFrame(
1102           bailout_id, shared_info_id, height);
1103       break;
1104     }
1105   }
1106 
1107   TranslateFrameStateDescriptorOperands(descriptor, iter);
1108 }
1109 
BuildTranslation(Instruction * instr,int pc_offset,size_t frame_state_offset,size_t immediate_args_count,OutputFrameStateCombine state_combine)1110 DeoptimizationExit* CodeGenerator::BuildTranslation(
1111     Instruction* instr, int pc_offset, size_t frame_state_offset,
1112     size_t immediate_args_count, OutputFrameStateCombine state_combine) {
1113   DeoptimizationEntry const& entry =
1114       GetDeoptimizationEntry(instr, frame_state_offset);
1115   FrameStateDescriptor* const descriptor = entry.descriptor();
1116   frame_state_offset++;
1117 
1118   const int update_feedback_count = entry.feedback().IsValid() ? 1 : 0;
1119   const int translation_index = translations_.BeginTranslation(
1120       static_cast<int>(descriptor->GetFrameCount()),
1121       static_cast<int>(descriptor->GetJSFrameCount()), update_feedback_count);
1122   if (entry.feedback().IsValid()) {
1123     DeoptimizationLiteral literal =
1124         DeoptimizationLiteral(entry.feedback().vector);
1125     int literal_id = DefineDeoptimizationLiteral(literal);
1126     translations_.AddUpdateFeedback(literal_id, entry.feedback().slot.ToInt());
1127   }
1128   InstructionOperandIterator iter(instr, frame_state_offset);
1129   BuildTranslationForFrameStateDescriptor(descriptor, &iter, state_combine);
1130 
1131   DeoptimizationExit* const exit = zone()->New<DeoptimizationExit>(
1132       current_source_position_, descriptor->bailout_id(), translation_index,
1133       pc_offset, entry.kind(), entry.reason(),
1134 #ifdef DEBUG
1135       entry.node_id());
1136 #else   // DEBUG
1137       0);
1138 #endif  // DEBUG
1139   if (immediate_args_count != 0) {
1140     auto immediate_args = zone()->New<ZoneVector<ImmediateOperand*>>(zone());
1141     InstructionOperandIterator imm_iter(
1142         instr, frame_state_offset - immediate_args_count - 1);
1143     for (size_t i = 0; i < immediate_args_count; i++) {
1144       immediate_args->emplace_back(ImmediateOperand::cast(imm_iter.Advance()));
1145     }
1146     exit->set_immediate_args(immediate_args);
1147   }
1148 
1149   deoptimization_exits_.push_back(exit);
1150   return exit;
1151 }
1152 
AddTranslationForOperand(Instruction * instr,InstructionOperand * op,MachineType type)1153 void CodeGenerator::AddTranslationForOperand(Instruction* instr,
1154                                              InstructionOperand* op,
1155                                              MachineType type) {
1156   if (op->IsStackSlot()) {
1157     if (type.representation() == MachineRepresentation::kBit) {
1158       translations_.StoreBoolStackSlot(LocationOperand::cast(op)->index());
1159     } else if (type == MachineType::Int8() || type == MachineType::Int16() ||
1160                type == MachineType::Int32()) {
1161       translations_.StoreInt32StackSlot(LocationOperand::cast(op)->index());
1162     } else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
1163                type == MachineType::Uint32()) {
1164       translations_.StoreUint32StackSlot(LocationOperand::cast(op)->index());
1165     } else if (type == MachineType::Int64()) {
1166       translations_.StoreInt64StackSlot(LocationOperand::cast(op)->index());
1167     } else {
1168 #if defined(V8_COMPRESS_POINTERS)
1169       CHECK(MachineRepresentation::kTagged == type.representation() ||
1170             MachineRepresentation::kCompressed == type.representation());
1171 #else
1172       CHECK(MachineRepresentation::kTagged == type.representation());
1173 #endif
1174       translations_.StoreStackSlot(LocationOperand::cast(op)->index());
1175     }
1176   } else if (op->IsFPStackSlot()) {
1177     if (type.representation() == MachineRepresentation::kFloat64) {
1178       translations_.StoreDoubleStackSlot(LocationOperand::cast(op)->index());
1179     } else {
1180       CHECK_EQ(MachineRepresentation::kFloat32, type.representation());
1181       translations_.StoreFloatStackSlot(LocationOperand::cast(op)->index());
1182     }
1183   } else if (op->IsRegister()) {
1184     InstructionOperandConverter converter(this, instr);
1185     if (type.representation() == MachineRepresentation::kBit) {
1186       translations_.StoreBoolRegister(converter.ToRegister(op));
1187     } else if (type == MachineType::Int8() || type == MachineType::Int16() ||
1188                type == MachineType::Int32()) {
1189       translations_.StoreInt32Register(converter.ToRegister(op));
1190     } else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
1191                type == MachineType::Uint32()) {
1192       translations_.StoreUint32Register(converter.ToRegister(op));
1193     } else if (type == MachineType::Int64()) {
1194       translations_.StoreInt64Register(converter.ToRegister(op));
1195     } else {
1196 #if defined(V8_COMPRESS_POINTERS)
1197       CHECK(MachineRepresentation::kTagged == type.representation() ||
1198             MachineRepresentation::kCompressed == type.representation());
1199 #else
1200       CHECK(MachineRepresentation::kTagged == type.representation());
1201 #endif
1202       translations_.StoreRegister(converter.ToRegister(op));
1203     }
1204   } else if (op->IsFPRegister()) {
1205     InstructionOperandConverter converter(this, instr);
1206     if (type.representation() == MachineRepresentation::kFloat64) {
1207       translations_.StoreDoubleRegister(converter.ToDoubleRegister(op));
1208     } else {
1209       CHECK_EQ(MachineRepresentation::kFloat32, type.representation());
1210       translations_.StoreFloatRegister(converter.ToFloatRegister(op));
1211     }
1212   } else {
1213     CHECK(op->IsImmediate());
1214     InstructionOperandConverter converter(this, instr);
1215     Constant constant = converter.ToConstant(op);
1216     DeoptimizationLiteral literal;
1217     switch (constant.type()) {
1218       case Constant::kInt32:
1219         if (type.representation() == MachineRepresentation::kTagged) {
1220           // When pointers are 4 bytes, we can use int32 constants to represent
1221           // Smis.
1222           DCHECK_EQ(4, kSystemPointerSize);
1223           Smi smi(static_cast<Address>(constant.ToInt32()));
1224           DCHECK(smi.IsSmi());
1225           literal = DeoptimizationLiteral(smi.value());
1226         } else if (type.representation() == MachineRepresentation::kBit) {
1227           if (constant.ToInt32() == 0) {
1228             literal =
1229                 DeoptimizationLiteral(isolate()->factory()->false_value());
1230           } else {
1231             DCHECK_EQ(1, constant.ToInt32());
1232             literal = DeoptimizationLiteral(isolate()->factory()->true_value());
1233           }
1234         } else {
1235           DCHECK(type == MachineType::Int32() ||
1236                  type == MachineType::Uint32() ||
1237                  type.representation() == MachineRepresentation::kWord32 ||
1238                  type.representation() == MachineRepresentation::kNone);
1239           DCHECK(type.representation() != MachineRepresentation::kNone ||
1240                  constant.ToInt32() == FrameStateDescriptor::kImpossibleValue);
1241           if (type == MachineType::Uint32()) {
1242             literal = DeoptimizationLiteral(
1243                 static_cast<uint32_t>(constant.ToInt32()));
1244           } else {
1245             literal = DeoptimizationLiteral(constant.ToInt32());
1246           }
1247         }
1248         break;
1249       case Constant::kInt64:
1250         DCHECK_EQ(8, kSystemPointerSize);
1251         if (type.representation() == MachineRepresentation::kWord64) {
1252           literal =
1253               DeoptimizationLiteral(static_cast<double>(constant.ToInt64()));
1254         } else {
1255           // When pointers are 8 bytes, we can use int64 constants to represent
1256           // Smis.
1257           DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
1258           Smi smi(static_cast<Address>(constant.ToInt64()));
1259           DCHECK(smi.IsSmi());
1260           literal = DeoptimizationLiteral(smi.value());
1261         }
1262         break;
1263       case Constant::kFloat32:
1264         DCHECK(type.representation() == MachineRepresentation::kFloat32 ||
1265                type.representation() == MachineRepresentation::kTagged);
1266         literal = DeoptimizationLiteral(constant.ToFloat32());
1267         break;
1268       case Constant::kFloat64:
1269         DCHECK(type.representation() == MachineRepresentation::kFloat64 ||
1270                type.representation() == MachineRepresentation::kTagged);
1271         literal = DeoptimizationLiteral(constant.ToFloat64().value());
1272         break;
1273       case Constant::kHeapObject:
1274         DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
1275         literal = DeoptimizationLiteral(constant.ToHeapObject());
1276         break;
1277       case Constant::kCompressedHeapObject:
1278         DCHECK_EQ(MachineType::AnyTagged(), type);
1279         literal = DeoptimizationLiteral(constant.ToHeapObject());
1280         break;
1281       case Constant::kDelayedStringConstant:
1282         DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
1283         literal = DeoptimizationLiteral(constant.ToDelayedStringConstant());
1284         break;
1285       default:
1286         UNREACHABLE();
1287     }
1288     if (literal.object().equals(info()->closure()) &&
1289         info()->function_context_specializing()) {
1290       translations_.StoreJSFrameFunction();
1291     } else {
1292       int literal_id = DefineDeoptimizationLiteral(literal);
1293       translations_.StoreLiteral(literal_id);
1294     }
1295   }
1296 }
1297 
MarkLazyDeoptSite()1298 void CodeGenerator::MarkLazyDeoptSite() {
1299   last_lazy_deopt_pc_ = tasm()->pc_offset();
1300 }
1301 
AddDeoptimizationExit(Instruction * instr,size_t frame_state_offset,size_t immediate_args_count)1302 DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
1303     Instruction* instr, size_t frame_state_offset,
1304     size_t immediate_args_count) {
1305   return BuildTranslation(instr, -1, frame_state_offset, immediate_args_count,
1306                           OutputFrameStateCombine::Ignore());
1307 }
1308 
OutOfLineCode(CodeGenerator * gen)1309 OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
1310     : frame_(gen->frame()), tasm_(gen->tasm()), next_(gen->ools_) {
1311   gen->ools_ = this;
1312 }
1313 
1314 OutOfLineCode::~OutOfLineCode() = default;
1315 
Reify(Isolate * isolate) const1316 Handle<Object> DeoptimizationLiteral::Reify(Isolate* isolate) const {
1317   Validate();
1318   switch (kind_) {
1319     case DeoptimizationLiteralKind::kObject: {
1320       return object_;
1321     }
1322     case DeoptimizationLiteralKind::kNumber: {
1323       return isolate->factory()->NewNumber(number_);
1324     }
1325     case DeoptimizationLiteralKind::kString: {
1326       return string_->AllocateStringConstant(isolate);
1327     }
1328     case DeoptimizationLiteralKind::kInvalid: {
1329       UNREACHABLE();
1330     }
1331   }
1332   UNREACHABLE();
1333 }
1334 
1335 }  // namespace compiler
1336 }  // namespace internal
1337 }  // namespace v8
1338